repository
stringclasses 11
values | repo_id
stringlengths 1
3
| target_module_path
stringlengths 16
72
| prompt
stringlengths 298
21.7k
| relavent_test_path
stringlengths 50
99
| full_function
stringlengths 336
33.8k
| function_name
stringlengths 2
51
|
---|---|---|---|---|---|---|
sphinx | 18 | sphinx/domains/javascript.py | def before_content(self) -> None:
"""Handle object nesting before content
:py:class:`JSObject` represents JavaScript language constructs. For
constructs that are nestable, this method will build up a stack of the
nesting hierarchy so that it can be later de-nested correctly, in
:py:meth:`after_content`.
For constructs that aren't nestable, the stack is bypassed, and instead
only the most recent object is tracked. This object prefix name will be
removed with :py:meth:`after_content`.
The following keys are used in ``self.env.ref_context``:
js:objects
Stores the object prefix history. With each nested element, we
add the object prefix to this list. When we exit that object's
nesting level, :py:meth:`after_content` is triggered and the
prefix is removed from the end of the list.
js:object
Current object prefix. This should generally reflect the last
element in the prefix history
"""
| /usr/src/app/target_test_cases/failed_tests_before_content.txt | def before_content(self) -> None:
"""Handle object nesting before content
:py:class:`JSObject` represents JavaScript language constructs. For
constructs that are nestable, this method will build up a stack of the
nesting hierarchy so that it can be later de-nested correctly, in
:py:meth:`after_content`.
For constructs that aren't nestable, the stack is bypassed, and instead
only the most recent object is tracked. This object prefix name will be
removed with :py:meth:`after_content`.
The following keys are used in ``self.env.ref_context``:
js:objects
Stores the object prefix history. With each nested element, we
add the object prefix to this list. When we exit that object's
nesting level, :py:meth:`after_content` is triggered and the
prefix is removed from the end of the list.
js:object
Current object prefix. This should generally reflect the last
element in the prefix history
"""
prefix = None
if self.names:
(obj_name, obj_name_prefix) = self.names.pop()
prefix = obj_name_prefix.strip('.') if obj_name_prefix else None
if self.allow_nesting:
prefix = obj_name
if prefix:
self.env.ref_context['js:object'] = prefix
if self.allow_nesting:
objects = self.env.ref_context.setdefault('js:objects', [])
objects.append(prefix)
| before_content |
sphinx | 19 | tests/test_builders/xpath_util.py | def check_xpath(
etree: ElementTree,
filename: str | os.PathLike[str],
xpath: str,
check: str | re.Pattern[str] | Callable[[Sequence[Element]], None] | None,
be_found: bool = True,
*,
min_count: int = 1,
) -> None:
"""Check that one or more nodes satisfy a predicate.
:param etree: The element tree.
:param filename: The element tree source name (for errors only).
:param xpath: An XPath expression to use.
:param check: Optional regular expression or a predicate the nodes must validate.
:param be_found: If false, negate the predicate.
:param min_count: Minimum number of nodes expected to satisfy the predicate.
* If *check* is empty (``''``), only the minimum count is checked.
* If *check* is ``None``, no node should satisfy the XPath expression.
"""
| /usr/src/app/target_test_cases/failed_tests_check_xpath.txt | def check_xpath(
etree: ElementTree,
filename: str | os.PathLike[str],
xpath: str,
check: str | re.Pattern[str] | Callable[[Sequence[Element]], None] | None,
be_found: bool = True,
*,
min_count: int = 1,
) -> None:
"""Check that one or more nodes satisfy a predicate.
:param etree: The element tree.
:param filename: The element tree source name (for errors only).
:param xpath: An XPath expression to use.
:param check: Optional regular expression or a predicate the nodes must validate.
:param be_found: If false, negate the predicate.
:param min_count: Minimum number of nodes expected to satisfy the predicate.
* If *check* is empty (``''``), only the minimum count is checked.
* If *check* is ``None``, no node should satisfy the XPath expression.
"""
nodes = etree.findall(xpath)
assert isinstance(nodes, list)
if check is None:
# use == to have a nice pytest diff
assert nodes == [], f'found nodes matching xpath {xpath!r} in file {filename}'
return
assert len(nodes) >= min_count, (
f'expecting at least {min_count} node(s) '
f'to satisfy {xpath!r} in file {filename}'
)
if check == '':
return
if callable(check):
check(nodes)
return
rex = re.compile(check)
if be_found:
if any(rex.search(_get_text(node)) for node in nodes):
return
else:
if all(not rex.search(_get_text(node)) for node in nodes):
return
ctx = textwrap.indent(_prettify(nodes), ' ' * 2)
msg = (
f'{check!r} not found in any node matching {xpath!r} in file {filename}:\n{ctx}'
)
raise AssertionError(msg)
| check_xpath |
sphinx | 20 | sphinx/util/fileutil.py | def copy_asset(source: str | os.PathLike[str], destination: str | os.PathLike[str],
excluded: PathMatcher = lambda path: False,
context: dict[str, Any] | None = None, renderer: BaseRenderer | None = None,
onerror: Callable[[str, Exception], None] | None = None,
*, force: bool = False) -> None:
"""Copy asset files to destination recursively.
On copying, it expands the template variables if context argument is given and
the asset is a template file.
Use ``copy_asset_file`` instead to copy a single file.
:param source: The path to source file or directory
:param destination: The path to destination directory
:param excluded: The matcher to determine the given path should be copied or not
:param context: The template variables. If not given, template files are simply copied
:param renderer: The template engine. If not given, SphinxRenderer is used by default
:param onerror: The error handler.
:param bool force: Overwrite the destination file even if it exists.
"""
| /usr/src/app/target_test_cases/failed_tests_copy_asset.txt | def copy_asset(source: str | os.PathLike[str], destination: str | os.PathLike[str],
excluded: PathMatcher = lambda path: False,
context: dict[str, Any] | None = None, renderer: BaseRenderer | None = None,
onerror: Callable[[str, Exception], None] | None = None,
*, force: bool = False) -> None:
"""Copy asset files to destination recursively.
On copying, it expands the template variables if context argument is given and
the asset is a template file.
Use ``copy_asset_file`` instead to copy a single file.
:param source: The path to source file or directory
:param destination: The path to destination directory
:param excluded: The matcher to determine the given path should be copied or not
:param context: The template variables. If not given, template files are simply copied
:param renderer: The template engine. If not given, SphinxRenderer is used by default
:param onerror: The error handler.
:param bool force: Overwrite the destination file even if it exists.
"""
if not os.path.exists(source):
return
if renderer is None:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
ensuredir(destination)
if os.path.isfile(source):
copy_asset_file(source, destination,
context=context,
renderer=renderer,
force=force)
return
for root, dirs, files in os.walk(source, followlinks=True):
reldir = relative_path(source, root)
for dir in dirs.copy():
if excluded(posixpath.join(reldir, dir)):
dirs.remove(dir)
else:
ensuredir(posixpath.join(destination, reldir, dir))
for filename in files:
if not excluded(posixpath.join(reldir, filename)):
try:
copy_asset_file(posixpath.join(root, filename),
posixpath.join(destination, reldir),
context=context,
renderer=renderer,
force=force)
except Exception as exc:
if onerror:
onerror(posixpath.join(root, filename), exc)
else:
raise
| copy_asset |
sphinx | 21 | sphinx/ext/githubpages.py | def create_nojekyll_and_cname(app: Sphinx, env: BuildEnvironment) -> None:
"""Manage the ``.nojekyll`` and ``CNAME`` files for GitHub Pages.
For HTML-format builders (e.g. 'html', 'dirhtml') we unconditionally create
the ``.nojekyll`` file to signal that GitHub Pages should not run Jekyll
processing.
If the :confval:`html_baseurl` option is set, we also create a CNAME file
with the domain from ``html_baseurl``, so long as it is not a ``github.io``
domain.
If this extension is loaded and the domain in ``html_baseurl`` no longer
requires a CNAME file, we remove any existing ``CNAME`` files from the
output directory.
"""
| /usr/src/app/target_test_cases/failed_tests_githubpages.create_nojekyll_and_cname.txt | def create_nojekyll_and_cname(app: Sphinx, env: BuildEnvironment) -> None:
"""Manage the ``.nojekyll`` and ``CNAME`` files for GitHub Pages.
For HTML-format builders (e.g. 'html', 'dirhtml') we unconditionally create
the ``.nojekyll`` file to signal that GitHub Pages should not run Jekyll
processing.
If the :confval:`html_baseurl` option is set, we also create a CNAME file
with the domain from ``html_baseurl``, so long as it is not a ``github.io``
domain.
If this extension is loaded and the domain in ``html_baseurl`` no longer
requires a CNAME file, we remove any existing ``CNAME`` files from the
output directory.
"""
if app.builder.format != 'html':
return
app.builder.outdir.joinpath('.nojekyll').touch()
cname_path = os.path.join(app.builder.outdir, 'CNAME')
domain = _get_domain_from_url(app.config.html_baseurl)
# Filter out GitHub Pages domains, as they do not require CNAME files.
if domain and not domain.endswith(".github.io"):
with open(cname_path, 'w', encoding="utf-8") as f:
# NOTE: don't write a trailing newline. The `CNAME` file that's
# auto-generated by the GitHub UI doesn't have one.
f.write(domain)
else:
with contextlib.suppress(FileNotFoundError):
os.unlink(cname_path)
| create_nojekyll_and_cname |
sphinx | 22 | sphinx/ext/doctest.py | def is_allowed_version(spec: str, version: str) -> bool:
"""Check `spec` satisfies `version` or not.
This obeys PEP-440 specifiers:
https://peps.python.org/pep-0440/#version-specifiers
Some examples:
>>> is_allowed_version('<=3.5', '3.3')
True
>>> is_allowed_version('<=3.2', '3.3')
False
>>> is_allowed_version('>3.2, <4.0', '3.3')
True
"""
| /usr/src/app/target_test_cases/failed_tests_is_allowed_version.txt | def is_allowed_version(spec: str, version: str) -> bool:
"""Check `spec` satisfies `version` or not.
This obeys PEP-440 specifiers:
https://peps.python.org/pep-0440/#version-specifiers
Some examples:
>>> is_allowed_version('<=3.5', '3.3')
True
>>> is_allowed_version('<=3.2', '3.3')
False
>>> is_allowed_version('>3.2, <4.0', '3.3')
True
"""
return Version(version) in SpecifierSet(spec)
| doctest.is_allowed_version |
sphinx | 23 | sphinx/util/docutils.py | def parse_inline(
self, text: str, *, lineno: int = -1,
) -> tuple[list[Node], list[system_message]]:
"""Parse *text* as inline elements.
:param text:
The text to parse, which should be a single line or paragraph.
This cannot contain any structural elements (headings,
transitions, directives, etc).
:param lineno:
The line number where the interpreted text begins.
:returns:
A list of nodes (text and inline elements) and a list of system_messages.
.. versionadded:: 7.4
"""
| /usr/src/app/target_test_cases/failed_tests_parse_inline.txt | def parse_inline(
self, text: str, *, lineno: int = -1,
) -> tuple[list[Node], list[system_message]]:
"""Parse *text* as inline elements.
:param text:
The text to parse, which should be a single line or paragraph.
This cannot contain any structural elements (headings,
transitions, directives, etc).
:param lineno:
The line number where the interpreted text begins.
:returns:
A list of nodes (text and inline elements) and a list of system_messages.
.. versionadded:: 7.4
"""
if lineno == -1:
lineno = self.lineno
return self.state.inline_text(text, lineno)
| docutils.SphinxDirective.parse_inline |
sphinx | 24 | sphinx/ext/autodoc/__init__.py | def filter_members(self, members: list[ObjectMember], want_all: bool,
) -> list[tuple[str, Any, bool]]:
"""Filter the given member list.
Members are skipped if
- they are private (except if given explicitly or the private-members
option is set)
- they are special methods (except if given explicitly or the
special-members option is set)
- they are undocumented (except if the undoc-members option is set)
The user can override the skipping decision by connecting to the
``autodoc-skip-member`` event.
"""
| /usr/src/app/target_test_cases/failed_tests_filter_members.txt | def filter_members(self, members: list[ObjectMember], want_all: bool,
) -> list[tuple[str, Any, bool]]:
"""Filter the given member list.
Members are skipped if
- they are private (except if given explicitly or the private-members
option is set)
- they are special methods (except if given explicitly or the
special-members option is set)
- they are undocumented (except if the undoc-members option is set)
The user can override the skipping decision by connecting to the
``autodoc-skip-member`` event.
"""
def is_filtered_inherited_member(name: str, obj: Any) -> bool:
inherited_members = self.options.inherited_members or set()
if inspect.isclass(self.object):
for cls in self.object.__mro__:
if cls.__name__ in inherited_members and cls != self.object:
# given member is a member of specified *super class*
return True
if name in cls.__dict__:
return False
if name in self.get_attr(cls, '__annotations__', {}):
return False
if isinstance(obj, ObjectMember) and obj.class_ is cls:
return False
return False
ret = []
# search for members in source code too
namespace = '.'.join(self.objpath) # will be empty for modules
if self.analyzer:
attr_docs = self.analyzer.find_attr_docs()
else:
attr_docs = {}
# process members and determine which to skip
for obj in members:
membername = obj.__name__
member = obj.object
# if isattr is True, the member is documented as an attribute
isattr = member is INSTANCEATTR or (namespace, membername) in attr_docs
try:
doc = getdoc(member, self.get_attr, self.config.autodoc_inherit_docstrings,
self.object, membername)
if not isinstance(doc, str):
# Ignore non-string __doc__
doc = None
# if the member __doc__ is the same as self's __doc__, it's just
# inherited and therefore not the member's doc
cls = self.get_attr(member, '__class__', None)
if cls:
cls_doc = self.get_attr(cls, '__doc__', None)
if cls_doc == doc:
doc = None
if isinstance(obj, ObjectMember) and obj.docstring:
# hack for ClassDocumenter to inject docstring via ObjectMember
doc = obj.docstring
doc, metadata = separate_metadata(doc)
has_doc = bool(doc)
if 'private' in metadata:
# consider a member private if docstring has "private" metadata
isprivate = True
elif 'public' in metadata:
# consider a member public if docstring has "public" metadata
isprivate = False
else:
isprivate = membername.startswith('_')
keep = False
if ismock(member) and (namespace, membername) not in attr_docs:
# mocked module or object
pass
elif (self.options.exclude_members and
membername in self.options.exclude_members):
# remove members given by exclude-members
keep = False
elif want_all and special_member_re.match(membername):
# special __methods__
if (self.options.special_members and
membername in self.options.special_members):
if membername == '__doc__': # NoQA: SIM114
keep = False
elif is_filtered_inherited_member(membername, obj):
keep = False
else:
keep = has_doc or self.options.undoc_members
else:
keep = False
elif (namespace, membername) in attr_docs:
if want_all and isprivate:
if self.options.private_members is None:
keep = False
else:
keep = membername in self.options.private_members
else:
# keep documented attributes
keep = True
elif want_all and isprivate:
if has_doc or self.options.undoc_members:
if self.options.private_members is None: # NoQA: SIM114
keep = False
elif is_filtered_inherited_member(membername, obj):
keep = False
else:
keep = membername in self.options.private_members
else:
keep = False
else:
if (self.options.members is ALL and
is_filtered_inherited_member(membername, obj)):
keep = False
else:
# ignore undocumented members if :undoc-members: is not given
keep = has_doc or self.options.undoc_members
if isinstance(obj, ObjectMember) and obj.skipped:
# forcedly skipped member (ex. a module attribute not defined in __all__)
keep = False
# give the user a chance to decide whether this member
# should be skipped
if self.env.app:
# let extensions preprocess docstrings
skip_user = self.env.app.emit_firstresult(
'autodoc-skip-member', self.objtype, membername, member,
not keep, self.options)
if skip_user is not None:
keep = not skip_user
except Exception as exc:
logger.warning(__('autodoc: failed to determine %s.%s (%r) to be documented, '
'the following exception was raised:\n%s'),
self.name, membername, member, exc, type='autodoc')
keep = False
if keep:
ret.append((membername, member, isattr))
return ret
| filter_members |
sphinx | 25 | sphinx/util/parsing.py | def nested_parse_to_nodes(
state: RSTState,
text: str | StringList,
*,
source: str = '<generated text>',
offset: int = 0,
allow_section_headings: bool = True,
keep_title_context: bool = False,
) -> list[Node]: # Element | nodes.Text
"""Parse *text* into nodes.
:param state:
The state machine state. Must be a subclass of ``RSTState``.
:param text:
Text, in string form. ``StringList`` is also accepted.
:param source:
The text's source, used when creating a new ``StringList``.
:param offset:
The offset of the content.
:param allow_section_headings:
Are titles (sections) allowed in *text*?
Note that this option bypasses Docutils' usual checks on
doctree structure, and misuse of this option can lead to
an incoherent doctree. In Docutils, section nodes should
only be children of ``Structural`` nodes, which includes
``document``, ``section``, and ``sidebar`` nodes.
:param keep_title_context:
If this is False (the default), then *content* is parsed as if it were
an independent document, meaning that title decorations (e.g. underlines)
do not need to match the surrounding document.
This is useful when the parsed content comes from
a completely different context, such as docstrings.
If this is True, then title underlines must match those in
the surrounding document, otherwise the behaviour is undefined.
.. versionadded:: 7.4
"""
| /usr/src/app/target_test_cases/failed_tests_parsing.nested_parse_to_nodes.txt | def nested_parse_to_nodes(
state: RSTState,
text: str | StringList,
*,
source: str = '<generated text>',
offset: int = 0,
allow_section_headings: bool = True,
keep_title_context: bool = False,
) -> list[Node]: # Element | nodes.Text
"""Parse *text* into nodes.
:param state:
The state machine state. Must be a subclass of ``RSTState``.
:param text:
Text, in string form. ``StringList`` is also accepted.
:param source:
The text's source, used when creating a new ``StringList``.
:param offset:
The offset of the content.
:param allow_section_headings:
Are titles (sections) allowed in *text*?
Note that this option bypasses Docutils' usual checks on
doctree structure, and misuse of this option can lead to
an incoherent doctree. In Docutils, section nodes should
only be children of ``Structural`` nodes, which includes
``document``, ``section``, and ``sidebar`` nodes.
:param keep_title_context:
If this is False (the default), then *content* is parsed as if it were
an independent document, meaning that title decorations (e.g. underlines)
do not need to match the surrounding document.
This is useful when the parsed content comes from
a completely different context, such as docstrings.
If this is True, then title underlines must match those in
the surrounding document, otherwise the behaviour is undefined.
.. versionadded:: 7.4
"""
document = state.document
content = _text_to_string_list(
text, source=source, tab_width=document.settings.tab_width,
)
node = Element() # Anonymous container for parsing
node.document = document
if keep_title_context:
state.nested_parse(content, offset, node, match_titles=allow_section_headings)
else:
with _fresh_title_style_context(state):
state.nested_parse(content, offset, node, match_titles=allow_section_headings)
return node.children
| nested_parse_to_nodes |
sphinx | 26 | sphinx/domains/std/__init__.py | def note_hyperlink_target(self, name: str, docname: str, node_id: str,
title: str = '') -> None:
"""Add a hyperlink target for cross reference.
.. warning::
This is only for internal use. Please don't use this from your extension.
``document.note_explicit_target()`` or ``note_implicit_target()`` are recommended to
add a hyperlink target to the document.
This only adds a hyperlink target to the StandardDomain. And this does not add a
node_id to node. Therefore, it is very fragile to calling this without
understanding hyperlink target framework in both docutils and Sphinx.
.. versionadded:: 3.0
"""
| /usr/src/app/target_test_cases/failed_tests_note_hyperlink_target.txt | def note_hyperlink_target(self, name: str, docname: str, node_id: str,
title: str = '') -> None:
"""Add a hyperlink target for cross reference.
.. warning::
This is only for internal use. Please don't use this from your extension.
``document.note_explicit_target()`` or ``note_implicit_target()`` are recommended to
add a hyperlink target to the document.
This only adds a hyperlink target to the StandardDomain. And this does not add a
node_id to node. Therefore, it is very fragile to calling this without
understanding hyperlink target framework in both docutils and Sphinx.
.. versionadded:: 3.0
"""
if name in self.anonlabels and self.anonlabels[name] != (docname, node_id):
logger.warning(__('duplicate label %s, other instance in %s'),
name, self.env.doc2path(self.anonlabels[name][0]))
self.anonlabels[name] = (docname, node_id)
if title:
self.labels[name] = (docname, node_id, title)
| note_hyperlink_target |
sphinx | 27 | sphinx/util/docutils.py | def parse_content_to_nodes(self, allow_section_headings: bool = False) -> list[Node]:
"""Parse the directive's content into nodes.
:param allow_section_headings:
Are titles (sections) allowed in the directive's content?
Note that this option bypasses Docutils' usual checks on
doctree structure, and misuse of this option can lead to
an incoherent doctree. In Docutils, section nodes should
only be children of ``Structural`` nodes, which includes
``document``, ``section``, and ``sidebar`` nodes.
.. versionadded:: 7.4
"""
| /usr/src/app/target_test_cases/failed_tests_parse_content_to_nodes.txt | def parse_content_to_nodes(self, allow_section_headings: bool = False) -> list[Node]:
"""Parse the directive's content into nodes.
:param allow_section_headings:
Are titles (sections) allowed in the directive's content?
Note that this option bypasses Docutils' usual checks on
doctree structure, and misuse of this option can lead to
an incoherent doctree. In Docutils, section nodes should
only be children of ``Structural`` nodes, which includes
``document``, ``section``, and ``sidebar`` nodes.
.. versionadded:: 7.4
"""
return nested_parse_to_nodes(
self.state,
self.content,
offset=self.content_offset,
allow_section_headings=allow_section_headings,
)
| parse_content_to_nodes |
sphinx | 28 | sphinx/util/docutils.py | def parse_text_to_nodes(
self, text: str = '', /, *, offset: int = -1, allow_section_headings: bool = False,
) -> list[Node]:
"""Parse *text* into nodes.
:param text:
Text, in string form. ``StringList`` is also accepted.
:param allow_section_headings:
Are titles (sections) allowed in *text*?
Note that this option bypasses Docutils' usual checks on
doctree structure, and misuse of this option can lead to
an incoherent doctree. In Docutils, section nodes should
only be children of ``Structural`` nodes, which includes
``document``, ``section``, and ``sidebar`` nodes.
:param offset:
The offset of the content.
.. versionadded:: 7.4
"""
| /usr/src/app/target_test_cases/failed_tests_parse_text_to_nodes.txt | def parse_text_to_nodes(
self, text: str = '', /, *, offset: int = -1, allow_section_headings: bool = False,
) -> list[Node]:
"""Parse *text* into nodes.
:param text:
Text, in string form. ``StringList`` is also accepted.
:param allow_section_headings:
Are titles (sections) allowed in *text*?
Note that this option bypasses Docutils' usual checks on
doctree structure, and misuse of this option can lead to
an incoherent doctree. In Docutils, section nodes should
only be children of ``Structural`` nodes, which includes
``document``, ``section``, and ``sidebar`` nodes.
:param offset:
The offset of the content.
.. versionadded:: 7.4
"""
if offset == -1:
offset = self.content_offset
return nested_parse_to_nodes(
self.state,
text,
offset=offset,
allow_section_headings=allow_section_headings,
)
| parse_text_to_nodes |
sphinx | 29 | sphinx/transforms/i18n.py | def publish_msgstr(app: Sphinx, source: str, source_path: str, source_line: int,
config: Config, settings: Any) -> nodes.Element:
"""Publish msgstr (single line) into docutils document
:param sphinx.application.Sphinx app: sphinx application
:param str source: source text
:param str source_path: source path for warning indication
:param source_line: source line for warning indication
:param sphinx.config.Config config: sphinx config
:param docutils.frontend.Values settings: docutils settings
:return: document
:rtype: docutils.nodes.document
"""
| /usr/src/app/target_test_cases/failed_tests_publish_msgstr.txt | def publish_msgstr(app: Sphinx, source: str, source_path: str, source_line: int,
config: Config, settings: Any) -> nodes.Element:
"""Publish msgstr (single line) into docutils document
:param sphinx.application.Sphinx app: sphinx application
:param str source: source text
:param str source_path: source path for warning indication
:param source_line: source line for warning indication
:param sphinx.config.Config config: sphinx config
:param docutils.frontend.Values settings: docutils settings
:return: document
:rtype: docutils.nodes.document
"""
try:
# clear rst_prolog temporarily
rst_prolog = config.rst_prolog
config.rst_prolog = None
from sphinx.io import SphinxI18nReader
reader = SphinxI18nReader()
reader.setup(app)
filetype = get_filetype(config.source_suffix, source_path)
parser = app.registry.create_source_parser(app, filetype)
doc = reader.read(
source=StringInput(source=source,
source_path=f"{source_path}:{source_line}:<translated>"),
parser=parser,
settings=settings,
)
with contextlib.suppress(IndexError): # empty node
return doc[0]
return doc
finally:
config.rst_prolog = rst_prolog
| publish_msgstr |
sphinx | 30 | sphinx/ext/imgmath.py | def render_math(
self: HTML5Translator,
math: str,
) -> tuple[str | None, int | None]:
"""Render the LaTeX math expression *math* using latex and dvipng or
dvisvgm.
Return the image absolute filename and the "depth",
that is, the distance of image bottom and baseline in pixels, if the
option to use preview_latex is switched on.
Error handling may seem strange, but follows a pattern: if LaTeX or dvipng
(dvisvgm) aren't available, only a warning is generated (since that enables
people on machines without these programs to at least build the rest of the
docs successfully). If the programs are there, however, they may not fail
since that indicates a problem in the math source.
"""
| /usr/src/app/target_test_cases/failed_tests_render_math.txt | def render_math(
self: HTML5Translator,
math: str,
) -> tuple[str | None, int | None]:
"""Render the LaTeX math expression *math* using latex and dvipng or
dvisvgm.
Return the image absolute filename and the "depth",
that is, the distance of image bottom and baseline in pixels, if the
option to use preview_latex is switched on.
Error handling may seem strange, but follows a pattern: if LaTeX or dvipng
(dvisvgm) aren't available, only a warning is generated (since that enables
people on machines without these programs to at least build the rest of the
docs successfully). If the programs are there, however, they may not fail
since that indicates a problem in the math source.
"""
image_format = self.builder.config.imgmath_image_format.lower()
if image_format not in SUPPORT_FORMAT:
unsupported_format_msg = 'imgmath_image_format must be either "png" or "svg"'
raise MathExtError(unsupported_format_msg)
latex = generate_latex_macro(image_format,
math,
self.builder.config,
self.builder.confdir)
filename = f"{sha1(latex.encode(), usedforsecurity=False).hexdigest()}.{image_format}"
generated_path = path.join(self.builder.outdir, self.builder.imagedir, 'math', filename)
ensuredir(path.dirname(generated_path))
if path.isfile(generated_path):
if image_format == 'png':
depth = read_png_depth(generated_path)
elif image_format == 'svg':
depth = read_svg_depth(generated_path)
return generated_path, depth
# if latex or dvipng (dvisvgm) has failed once, don't bother to try again
if hasattr(self.builder, '_imgmath_warned_latex') or \
hasattr(self.builder, '_imgmath_warned_image_translator'):
return None, None
# .tex -> .dvi
try:
dvipath = compile_math(latex, self.builder)
except InvokeError:
self.builder._imgmath_warned_latex = True # type: ignore[attr-defined]
return None, None
# .dvi -> .png/.svg
try:
if image_format == 'png':
depth = convert_dvi_to_png(dvipath, self.builder, generated_path)
elif image_format == 'svg':
depth = convert_dvi_to_svg(dvipath, self.builder, generated_path)
except InvokeError:
self.builder._imgmath_warned_image_translator = True # type: ignore[attr-defined]
return None, None
return generated_path, depth
| render_math |
sphinx | 31 | sphinx/util/typing.py | def stringify_annotation(
annotation: Any,
/,
mode: _StringifyMode = 'fully-qualified-except-typing',
) -> str:
"""Stringify type annotation object.
:param annotation: The annotation to stringified.
:param mode: Specify a method how annotations will be stringified.
'fully-qualified-except-typing'
Show the module name and qualified name of the annotation except
the "typing" module.
'smart'
Show the name of the annotation.
'fully-qualified'
Show the module name and qualified name of the annotation.
"""
| /usr/src/app/target_test_cases/failed_tests_stringify_annotation.txt | def stringify_annotation(
annotation: Any,
/,
mode: _StringifyMode = 'fully-qualified-except-typing',
) -> str:
"""Stringify type annotation object.
:param annotation: The annotation to stringified.
:param mode: Specify a method how annotations will be stringified.
'fully-qualified-except-typing'
Show the module name and qualified name of the annotation except
the "typing" module.
'smart'
Show the name of the annotation.
'fully-qualified'
Show the module name and qualified name of the annotation.
"""
from sphinx.ext.autodoc.mock import ismock, ismockmodule # lazy loading
valid_modes = {'fully-qualified-except-typing', 'fully-qualified', 'smart'}
if mode not in valid_modes:
valid = ', '.join(map(repr, sorted(valid_modes)))
msg = f'mode must be one of {valid}; got {mode!r}'
raise ValueError(msg)
# things that are not types
if annotation is None or annotation == types.NoneType:
return 'None'
if annotation is Ellipsis:
return '...'
if isinstance(annotation, str):
if annotation.startswith("'") and annotation.endswith("'"):
# Might be a double Forward-ref'ed type. Go unquoting.
return annotation[1:-1]
return annotation
if not annotation:
return repr(annotation)
module_prefix = '~' if mode == 'smart' else ''
# The values below must be strings if the objects are well-formed.
annotation_qualname: str = getattr(annotation, '__qualname__', '')
annotation_module: str = getattr(annotation, '__module__', '')
annotation_name: str = getattr(annotation, '__name__', '')
annotation_module_is_typing = annotation_module == 'typing'
# Extract the annotation's base type by considering formattable cases
if isinstance(annotation, TypeVar) and not _is_unpack_form(annotation):
# typing_extensions.Unpack is incorrectly determined as a TypeVar
if annotation_module_is_typing and mode in {'fully-qualified-except-typing', 'smart'}:
return annotation_name
return module_prefix + f'{annotation_module}.{annotation_name}'
elif isinstance(annotation, NewType):
return module_prefix + f'{annotation_module}.{annotation_name}'
elif ismockmodule(annotation):
return module_prefix + annotation_name
elif ismock(annotation):
return module_prefix + f'{annotation_module}.{annotation_name}'
elif is_invalid_builtin_class(annotation):
return module_prefix + _INVALID_BUILTIN_CLASSES[annotation]
elif _is_annotated_form(annotation): # for py310+
pass
elif annotation_module == 'builtins' and annotation_qualname:
args = getattr(annotation, '__args__', None)
if args is None:
return annotation_qualname
# PEP 585 generic
if not args: # Empty tuple, list, ...
return repr(annotation)
concatenated_args = ', '.join(stringify_annotation(arg, mode) for arg in args)
return f'{annotation_qualname}[{concatenated_args}]'
else:
# add other special cases that can be directly formatted
pass
module_prefix = f'{annotation_module}.'
annotation_forward_arg: str | None = getattr(annotation, '__forward_arg__', None)
if annotation_qualname or (annotation_module_is_typing and not annotation_forward_arg):
if mode == 'smart':
module_prefix = f'~{module_prefix}'
if annotation_module_is_typing and mode == 'fully-qualified-except-typing':
module_prefix = ''
elif _is_unpack_form(annotation) and annotation_module == 'typing_extensions':
module_prefix = '~' if mode == 'smart' else ''
else:
module_prefix = ''
if annotation_module_is_typing:
if annotation_forward_arg:
# handle ForwardRefs
qualname = annotation_forward_arg
else:
if annotation_name:
qualname = annotation_name
elif annotation_qualname:
qualname = annotation_qualname
else:
# in this case, we know that the annotation is a member
# of ``typing`` and all of them define ``__origin__``
qualname = stringify_annotation(
annotation.__origin__, 'fully-qualified-except-typing',
).replace('typing.', '') # ex. Union
elif annotation_qualname:
qualname = annotation_qualname
elif hasattr(annotation, '__origin__'):
# instantiated generic provided by a user
qualname = stringify_annotation(annotation.__origin__, mode)
elif isinstance(annotation, types.UnionType):
qualname = 'types.UnionType'
else:
# we weren't able to extract the base type, appending arguments would
# only make them appear twice
return repr(annotation)
# Process the generic arguments (if any).
# They must be a list or a tuple, otherwise they are considered 'broken'.
annotation_args = getattr(annotation, '__args__', ())
if annotation_args and isinstance(annotation_args, list | tuple):
if (
qualname in {'Union', 'types.UnionType'}
and all(getattr(a, '__origin__', ...) is typing.Literal for a in annotation_args)
):
# special case to flatten a Union of Literals into a literal
flattened_args = typing.Literal[annotation_args].__args__ # type: ignore[attr-defined]
args = ', '.join(_format_literal_arg_stringify(a, mode=mode)
for a in flattened_args)
return f'{module_prefix}Literal[{args}]'
if qualname in {'Optional', 'Union', 'types.UnionType'}:
return ' | '.join(stringify_annotation(a, mode) for a in annotation_args)
elif qualname == 'Callable':
args = ', '.join(stringify_annotation(a, mode) for a in annotation_args[:-1])
returns = stringify_annotation(annotation_args[-1], mode)
return f'{module_prefix}Callable[[{args}], {returns}]'
elif qualname == 'Literal':
args = ', '.join(_format_literal_arg_stringify(a, mode=mode)
for a in annotation_args)
return f'{module_prefix}Literal[{args}]'
elif _is_annotated_form(annotation): # for py310+
args = stringify_annotation(annotation_args[0], mode)
meta_args = []
for m in annotation.__metadata__:
if isinstance(m, type):
meta_args.append(stringify_annotation(m, mode))
elif dataclasses.is_dataclass(m):
# use stringify_annotation for the repr of field values rather than repr
d_fields = ', '.join([
f"{f.name}={stringify_annotation(getattr(m, f.name), mode)}"
for f in dataclasses.fields(m) if f.repr
])
meta_args.append(f'{stringify_annotation(type(m), mode)}({d_fields})')
else:
meta_args.append(repr(m))
meta = ', '.join(meta_args)
if sys.version_info[:2] <= (3, 11):
if mode == 'fully-qualified-except-typing':
return f'Annotated[{args}, {meta}]'
module_prefix = module_prefix.replace('builtins', 'typing')
return f'{module_prefix}Annotated[{args}, {meta}]'
return f'{module_prefix}Annotated[{args}, {meta}]'
elif all(is_system_TypeVar(a) for a in annotation_args):
# Suppress arguments if all system defined TypeVars (ex. Dict[KT, VT])
return module_prefix + qualname
else:
args = ', '.join(stringify_annotation(a, mode) for a in annotation_args)
return f'{module_prefix}{qualname}[{args}]'
return module_prefix + qualname
| stringify_annotation |
sphinx | 32 | sphinx/util/logging.py | def warning( # type: ignore[override]
self,
msg: object,
*args: object,
type: None | str = None,
subtype: None | str = None,
location: None | str | tuple[str | None, int | None] | Node = None,
nonl: bool = True,
color: str | None = None,
once: bool = False,
**kwargs: Any,
) -> None:
"""Log a sphinx warning.
It is recommended to include a ``type`` and ``subtype`` for warnings as
these can be displayed to the user using :confval:`show_warning_types`
and used in :confval:`suppress_warnings` to suppress specific warnings.
It is also recommended to specify a ``location`` whenever possible
to help users in correcting the warning.
:param msg: The message, which may contain placeholders for ``args``.
:param args: The arguments to substitute into ``msg``.
:param type: The type of the warning.
:param subtype: The subtype of the warning.
:param location: The source location of the warning's origin,
which can be a string (the ``docname`` or ``docname:lineno``),
a tuple of ``(docname, lineno)``,
or the docutils node object.
:param nonl: Whether to append a new line terminator to the message.
:param color: A color code for the message.
:param once: Do not log this warning,
if a previous warning already has same ``msg``, ``args`` and ``once=True``.
"""
| /usr/src/app/target_test_cases/failed_tests_warning.txt | def warning( # type: ignore[override]
self,
msg: object,
*args: object,
type: None | str = None,
subtype: None | str = None,
location: None | str | tuple[str | None, int | None] | Node = None,
nonl: bool = True,
color: str | None = None,
once: bool = False,
**kwargs: Any,
) -> None:
"""Log a sphinx warning.
It is recommended to include a ``type`` and ``subtype`` for warnings as
these can be displayed to the user using :confval:`show_warning_types`
and used in :confval:`suppress_warnings` to suppress specific warnings.
It is also recommended to specify a ``location`` whenever possible
to help users in correcting the warning.
:param msg: The message, which may contain placeholders for ``args``.
:param args: The arguments to substitute into ``msg``.
:param type: The type of the warning.
:param subtype: The subtype of the warning.
:param location: The source location of the warning's origin,
which can be a string (the ``docname`` or ``docname:lineno``),
a tuple of ``(docname, lineno)``,
or the docutils node object.
:param nonl: Whether to append a new line terminator to the message.
:param color: A color code for the message.
:param once: Do not log this warning,
if a previous warning already has same ``msg``, ``args`` and ``once=True``.
"""
return super().warning(
msg,
*args,
type=type,
subtype=subtype,
location=location,
nonl=nonl,
color=color,
once=once,
**kwargs,
)
| warning |
xarray | 0 | xarray/backends/file_manager.py | def acquire(self, needs_lock=True):
"""Acquire a file object from the manager.
A new file is only opened if it has expired from the
least-recently-used cache.
This method uses a lock, which ensures that it is thread-safe. You can
safely acquire a file in multiple threads at the same time, as long as
the underlying file object is thread-safe.
Returns
-------
file-like
An open file object, as returned by ``opener(*args, **kwargs)``.
"""
| /usr/src/app/target_test_cases/failed_tests_CachingFileManager.acquire.txt | def acquire(self, needs_lock=True):
"""Acquire a file object from the manager.
A new file is only opened if it has expired from the
least-recently-used cache.
This method uses a lock, which ensures that it is thread-safe. You can
safely acquire a file in multiple threads at the same time, as long as
the underlying file object is thread-safe.
Returns
-------
file-like
An open file object, as returned by ``opener(*args, **kwargs)``.
"""
file, _ = self._acquire_with_cache_info(needs_lock)
return file
| CachingFileManager.acquire |
xarray | 1 | xarray/core/coordinates.py | def assign(self, coords: Mapping | None = None, **coords_kwargs: Any) -> Self:
"""Assign new coordinates (and indexes) to a Coordinates object, returning
a new object with all the original coordinates in addition to the new ones.
Parameters
----------
coords : mapping of dim to coord, optional
A mapping whose keys are the names of the coordinates and values are the
coordinates to assign. The mapping will generally be a dict or
:class:`Coordinates`.
* If a value is a standard data value — for example, a ``DataArray``,
scalar, or array — the data is simply assigned as a coordinate.
* A coordinate can also be defined and attached to an existing dimension
using a tuple with the first element the dimension name and the second
element the values for this new coordinate.
**coords_kwargs
The keyword arguments form of ``coords``.
One of ``coords`` or ``coords_kwargs`` must be provided.
Returns
-------
new_coords : Coordinates
A new Coordinates object with the new coordinates (and indexes)
in addition to all the existing coordinates.
Examples
--------
>>> coords = xr.Coordinates()
>>> coords
Coordinates:
*empty*
>>> coords.assign(x=[1, 2])
Coordinates:
* x (x) int64 16B 1 2
>>> midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]])
>>> coords.assign(xr.Coordinates.from_pandas_multiindex(midx, "y"))
Coordinates:
* y (y) object 32B MultiIndex
* y_level_0 (y) object 32B 'a' 'a' 'b' 'b'
* y_level_1 (y) int64 32B 0 1 0 1
"""
| /usr/src/app/target_test_cases/failed_tests_Coordinates.assign.txt | def assign(self, coords: Mapping | None = None, **coords_kwargs: Any) -> Self:
"""Assign new coordinates (and indexes) to a Coordinates object, returning
a new object with all the original coordinates in addition to the new ones.
Parameters
----------
coords : mapping of dim to coord, optional
A mapping whose keys are the names of the coordinates and values are the
coordinates to assign. The mapping will generally be a dict or
:class:`Coordinates`.
* If a value is a standard data value — for example, a ``DataArray``,
scalar, or array — the data is simply assigned as a coordinate.
* A coordinate can also be defined and attached to an existing dimension
using a tuple with the first element the dimension name and the second
element the values for this new coordinate.
**coords_kwargs
The keyword arguments form of ``coords``.
One of ``coords`` or ``coords_kwargs`` must be provided.
Returns
-------
new_coords : Coordinates
A new Coordinates object with the new coordinates (and indexes)
in addition to all the existing coordinates.
Examples
--------
>>> coords = xr.Coordinates()
>>> coords
Coordinates:
*empty*
>>> coords.assign(x=[1, 2])
Coordinates:
* x (x) int64 16B 1 2
>>> midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]])
>>> coords.assign(xr.Coordinates.from_pandas_multiindex(midx, "y"))
Coordinates:
* y (y) object 32B MultiIndex
* y_level_0 (y) object 32B 'a' 'a' 'b' 'b'
* y_level_1 (y) int64 32B 0 1 0 1
"""
# TODO: this doesn't support a callable, which is inconsistent with `DataArray.assign_coords`
coords = either_dict_or_kwargs(coords, coords_kwargs, "assign")
new_coords = self.copy()
new_coords.update(coords)
return new_coords
| Coordinates.assign |
xarray | 2 | xarray/core/datatree.py | def to_dataset(self, inherited: bool = True) -> Dataset:
"""
Return the data in this node as a new xarray.Dataset object.
Parameters
----------
inherited : bool, optional
If False, only include coordinates and indexes defined at the level
of this DataTree node, excluding any inherited coordinates and indexes.
See Also
--------
DataTree.dataset
"""
| /usr/src/app/target_test_cases/failed_tests_DataTree.to_dataset.txt | def to_dataset(self, inherited: bool = True) -> Dataset:
"""
Return the data in this node as a new xarray.Dataset object.
Parameters
----------
inherited : bool, optional
If False, only include coordinates and indexes defined at the level
of this DataTree node, excluding any inherited coordinates and indexes.
See Also
--------
DataTree.dataset
"""
coord_vars = self._coord_variables if inherited else self._node_coord_variables
variables = dict(self._data_variables)
variables |= coord_vars
dims = calculate_dimensions(variables) if inherited else dict(self._node_dims)
return Dataset._construct_direct(
variables,
set(coord_vars),
dims,
None if self._attrs is None else dict(self._attrs),
dict(self._indexes if inherited else self._node_indexes),
None if self._encoding is None else dict(self._encoding),
self._close,
)
| DataTree.to_dataset |
xarray | 3 | xarray/core/treenode.py | def _set_item(
self: Tree,
path: str | NodePath,
item: Tree | T_DataArray,
new_nodes_along_path: bool = False,
allow_overwrite: bool = True,
) -> None:
"""
Set a new item in the tree, overwriting anything already present at that path.
The given value either forms a new node of the tree or overwrites an
existing item at that location.
Parameters
----------
path
item
new_nodes_along_path : bool
If true, then if necessary new nodes will be created along the
given path, until the tree can reach the specified location.
allow_overwrite : bool
Whether or not to overwrite any existing node at the location given
by path.
Raises
------
KeyError
If node cannot be reached, and new_nodes_along_path=False.
Or if a node already exists at the specified path, and allow_overwrite=False.
"""
| /usr/src/app/target_test_cases/failed_tests_TreeNode._set_item.txt | def _set_item(
self: Tree,
path: str | NodePath,
item: Tree | T_DataArray,
new_nodes_along_path: bool = False,
allow_overwrite: bool = True,
) -> None:
"""
Set a new item in the tree, overwriting anything already present at that path.
The given value either forms a new node of the tree or overwrites an
existing item at that location.
Parameters
----------
path
item
new_nodes_along_path : bool
If true, then if necessary new nodes will be created along the
given path, until the tree can reach the specified location.
allow_overwrite : bool
Whether or not to overwrite any existing node at the location given
by path.
Raises
------
KeyError
If node cannot be reached, and new_nodes_along_path=False.
Or if a node already exists at the specified path, and allow_overwrite=False.
"""
if isinstance(path, str):
path = NodePath(path)
if not path.name:
raise ValueError("Can't set an item under a path which has no name")
if path.root:
# absolute path
current_node = self.root
root, *parts, name = path.parts
else:
# relative path
current_node = self
*parts, name = path.parts
if parts:
# Walk to location of new node, creating intermediate node objects as we go if necessary
for part in parts:
if part == "..":
if current_node.parent is None:
# We can't create a parent if `new_nodes_along_path=True` as we wouldn't know what to name it
raise KeyError(f"Could not reach node at path {path}")
else:
current_node = current_node.parent
elif part in ("", "."):
pass
else:
if part in current_node.children:
current_node = current_node.children[part]
elif new_nodes_along_path:
# Want child classes (i.e. DataTree) to populate tree with their own types
new_node = type(self)()
current_node._set(part, new_node)
current_node = current_node.children[part]
else:
raise KeyError(f"Could not reach node at path {path}")
if name in current_node.children:
# Deal with anything already existing at this location
if allow_overwrite:
current_node._set(name, item)
else:
raise KeyError(f"Already a node object at path {path}")
else:
current_node._set(name, item)
| TreeNode._set_item |
xarray | 4 | xarray/backends/file_manager.py | def __init__(
self,
opener,
*args,
mode=_DEFAULT_MODE,
kwargs=None,
lock=None,
cache=None,
manager_id: Hashable | None = None,
ref_counts=None,
):
"""Initialize a CachingFileManager.
The cache, manager_id and ref_counts arguments exist solely to
facilitate dependency injection, and should only be set for tests.
Parameters
----------
opener : callable
Function that when called like ``opener(*args, **kwargs)`` returns
an open file object. The file object must implement a ``close()``
method.
*args
Positional arguments for opener. A ``mode`` argument should be
provided as a keyword argument (see below). All arguments must be
hashable.
mode : optional
If provided, passed as a keyword argument to ``opener`` along with
``**kwargs``. ``mode='w' `` has special treatment: after the first
call it is replaced by ``mode='a'`` in all subsequent function to
avoid overriding the newly created file.
kwargs : dict, optional
Keyword arguments for opener, excluding ``mode``. All values must
be hashable.
lock : duck-compatible threading.Lock, optional
Lock to use when modifying the cache inside acquire() and close().
By default, uses a new threading.Lock() object. If set, this object
should be pickleable.
cache : MutableMapping, optional
Mapping to use as a cache for open files. By default, uses xarray's
global LRU file cache. Because ``cache`` typically points to a
global variable and contains non-picklable file objects, an
unpickled FileManager objects will be restored with the default
cache.
manager_id : hashable, optional
Identifier for this CachingFileManager.
ref_counts : dict, optional
Optional dict to use for keeping track the number of references to
the same file.
"""
| /usr/src/app/target_test_cases/failed_tests___init__.txt | def __init__(
self,
opener,
*args,
mode=_DEFAULT_MODE,
kwargs=None,
lock=None,
cache=None,
manager_id: Hashable | None = None,
ref_counts=None,
):
"""Initialize a CachingFileManager.
The cache, manager_id and ref_counts arguments exist solely to
facilitate dependency injection, and should only be set for tests.
Parameters
----------
opener : callable
Function that when called like ``opener(*args, **kwargs)`` returns
an open file object. The file object must implement a ``close()``
method.
*args
Positional arguments for opener. A ``mode`` argument should be
provided as a keyword argument (see below). All arguments must be
hashable.
mode : optional
If provided, passed as a keyword argument to ``opener`` along with
``**kwargs``. ``mode='w' `` has special treatment: after the first
call it is replaced by ``mode='a'`` in all subsequent function to
avoid overriding the newly created file.
kwargs : dict, optional
Keyword arguments for opener, excluding ``mode``. All values must
be hashable.
lock : duck-compatible threading.Lock, optional
Lock to use when modifying the cache inside acquire() and close().
By default, uses a new threading.Lock() object. If set, this object
should be pickleable.
cache : MutableMapping, optional
Mapping to use as a cache for open files. By default, uses xarray's
global LRU file cache. Because ``cache`` typically points to a
global variable and contains non-picklable file objects, an
unpickled FileManager objects will be restored with the default
cache.
manager_id : hashable, optional
Identifier for this CachingFileManager.
ref_counts : dict, optional
Optional dict to use for keeping track the number of references to
the same file.
"""
self._opener = opener
self._args = args
self._mode = mode
self._kwargs = {} if kwargs is None else dict(kwargs)
self._use_default_lock = lock is None or lock is False
self._lock = threading.Lock() if self._use_default_lock else lock
# cache[self._key] stores the file associated with this object.
if cache is None:
cache = FILE_CACHE
self._cache = cache
if manager_id is None:
# Each call to CachingFileManager should separately open files.
manager_id = str(uuid.uuid4())
self._manager_id = manager_id
self._key = self._make_key()
# ref_counts[self._key] stores the number of CachingFileManager objects
# in memory referencing this same file. We use this to know if we can
# close a file when the manager is deallocated.
if ref_counts is None:
ref_counts = REF_COUNTS
self._ref_counter = _RefCounter(ref_counts)
self._ref_counter.increment(self._key)
| __init__ |
xarray | 5 | xarray/core/resample_cftime.py | def _adjust_dates_anchored(
first: CFTimeDatetime,
last: CFTimeDatetime,
freq: Tick,
closed: SideOptions = "right",
origin: str | CFTimeDatetime = "start_day",
offset: datetime.timedelta | None = None,
):
"""First and last offsets should be calculated from the start day to fix
an error cause by resampling across multiple days when a one day period is
not a multiple of the frequency.
See https://github.com/pandas-dev/pandas/issues/8683
Parameters
----------
first : cftime.datetime
A datetime object representing the start of a CFTimeIndex range.
last : cftime.datetime
A datetime object representing the end of a CFTimeIndex range.
freq : xarray.coding.cftime_offsets.BaseCFTimeOffset
The offset object representing target conversion a.k.a. resampling
frequency. Contains information on offset type (e.g. Day or 'D') and
offset magnitude (e.g., n = 3).
closed : 'left' or 'right'
Which side of bin interval is closed. Defaults to 'right'.
origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day'
The datetime on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a datetime is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
offset : datetime.timedelta, default is None
An offset timedelta added to the origin.
Returns
-------
fresult : cftime.datetime
A datetime object representing the start of a date range that has been
adjusted to fix resampling errors.
lresult : cftime.datetime
A datetime object representing the end of a date range that has been
adjusted to fix resampling errors.
"""
| /usr/src/app/target_test_cases/failed_tests__adjust_dates_anchored.txt | def _adjust_dates_anchored(
first: CFTimeDatetime,
last: CFTimeDatetime,
freq: Tick,
closed: SideOptions = "right",
origin: str | CFTimeDatetime = "start_day",
offset: datetime.timedelta | None = None,
):
"""First and last offsets should be calculated from the start day to fix
an error cause by resampling across multiple days when a one day period is
not a multiple of the frequency.
See https://github.com/pandas-dev/pandas/issues/8683
Parameters
----------
first : cftime.datetime
A datetime object representing the start of a CFTimeIndex range.
last : cftime.datetime
A datetime object representing the end of a CFTimeIndex range.
freq : xarray.coding.cftime_offsets.BaseCFTimeOffset
The offset object representing target conversion a.k.a. resampling
frequency. Contains information on offset type (e.g. Day or 'D') and
offset magnitude (e.g., n = 3).
closed : 'left' or 'right'
Which side of bin interval is closed. Defaults to 'right'.
origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day'
The datetime on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a datetime is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
offset : datetime.timedelta, default is None
An offset timedelta added to the origin.
Returns
-------
fresult : cftime.datetime
A datetime object representing the start of a date range that has been
adjusted to fix resampling errors.
lresult : cftime.datetime
A datetime object representing the end of a date range that has been
adjusted to fix resampling errors.
"""
import cftime
if origin == "start_day":
origin_date = normalize_date(first)
elif origin == "start":
origin_date = first
elif origin == "epoch":
origin_date = type(first)(1970, 1, 1)
elif origin in ["end", "end_day"]:
origin_last = last if origin == "end" else _ceil_via_cftimeindex(last, "D")
sub_freq_times = (origin_last - first) // freq.as_timedelta()
if closed == "left":
sub_freq_times += 1
first = origin_last - sub_freq_times * freq
origin_date = first
elif isinstance(origin, cftime.datetime):
origin_date = origin
else:
raise ValueError(
f"origin must be one of {{'epoch', 'start_day', 'start', 'end', 'end_day'}} "
f"or a cftime.datetime object. Got {origin}."
)
if offset is not None:
origin_date = origin_date + offset
foffset = (first - origin_date) % freq.as_timedelta()
loffset = (last - origin_date) % freq.as_timedelta()
if closed == "right":
if foffset.total_seconds() > 0:
fresult = first - foffset
else:
fresult = first - freq.as_timedelta()
if loffset.total_seconds() > 0:
lresult = last + (freq.as_timedelta() - loffset)
else:
lresult = last
else:
if foffset.total_seconds() > 0:
fresult = first - foffset
else:
fresult = first
if loffset.total_seconds() > 0:
lresult = last + (freq.as_timedelta() - loffset)
else:
lresult = last + freq
return fresult, lresult
| _adjust_dates_anchored |
xarray | 6 | xarray/core/combine.py | def _combine_nd(
combined_ids,
concat_dims,
data_vars="all",
coords="different",
compat: CompatOptions = "no_conflicts",
fill_value=dtypes.NA,
join: JoinOptions = "outer",
combine_attrs: CombineAttrsOptions = "drop",
):
"""
Combines an N-dimensional structure of datasets into one by applying a
series of either concat and merge operations along each dimension.
No checks are performed on the consistency of the datasets, concat_dims or
tile_IDs, because it is assumed that this has already been done.
Parameters
----------
combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]
Structure containing all datasets to be concatenated with "tile_IDs" as
keys, which specify position within the desired final combined result.
concat_dims : sequence of str
The dimensions along which the datasets should be concatenated. Must be
in order, and the length must match the length of the tuples used as
keys in combined_ids. If the string is a dimension name then concat
along that dimension, if it is None then merge.
Returns
-------
combined_ds : xarray.Dataset
"""
| /usr/src/app/target_test_cases/failed_tests__combine_nd.txt | def _combine_nd(
combined_ids,
concat_dims,
data_vars="all",
coords="different",
compat: CompatOptions = "no_conflicts",
fill_value=dtypes.NA,
join: JoinOptions = "outer",
combine_attrs: CombineAttrsOptions = "drop",
):
"""
Combines an N-dimensional structure of datasets into one by applying a
series of either concat and merge operations along each dimension.
No checks are performed on the consistency of the datasets, concat_dims or
tile_IDs, because it is assumed that this has already been done.
Parameters
----------
combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]
Structure containing all datasets to be concatenated with "tile_IDs" as
keys, which specify position within the desired final combined result.
concat_dims : sequence of str
The dimensions along which the datasets should be concatenated. Must be
in order, and the length must match the length of the tuples used as
keys in combined_ids. If the string is a dimension name then concat
along that dimension, if it is None then merge.
Returns
-------
combined_ds : xarray.Dataset
"""
example_tile_id = next(iter(combined_ids.keys()))
n_dims = len(example_tile_id)
if len(concat_dims) != n_dims:
raise ValueError(
f"concat_dims has length {len(concat_dims)} but the datasets "
f"passed are nested in a {n_dims}-dimensional structure"
)
# Each iteration of this loop reduces the length of the tile_ids tuples
# by one. It always combines along the first dimension, removing the first
# element of the tuple
for concat_dim in concat_dims:
combined_ids = _combine_all_along_first_dim(
combined_ids,
dim=concat_dim,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
(combined_ds,) = combined_ids.values()
return combined_ds
| _combine_nd |
xarray | 7 | xarray/core/indexing.py | def _decompose_outer_indexer(
indexer: BasicIndexer | OuterIndexer,
shape: _Shape,
indexing_support: IndexingSupport,
) -> tuple[ExplicitIndexer, ExplicitIndexer]:
"""
Decompose outer indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index the loaded on-memory np.ndarray.
Parameters
----------
indexer : OuterIndexer or BasicIndexer
indexing_support : One of the entries of IndexingSupport
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports basic indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> array = np.arange(36).reshape(6, 6)
>>> backend_indexer = BasicIndexer((slice(0, 3), slice(2, 4)))
>>> # load subslice of the array
... array = NumpyIndexingAdapter(array)[backend_indexer]
>>> np_indexer = OuterIndexer((np.array([0, 2, 1]), np.array([0, 1, 0])))
>>> # outer indexing for on-memory np.ndarray.
... NumpyIndexingAdapter(array).oindex[np_indexer]
array([[ 2, 3, 2],
[14, 15, 14],
[ 8, 9, 8]])
"""
| /usr/src/app/target_test_cases/failed_tests__decompose_outer_indexer.txt | def _decompose_outer_indexer(
indexer: BasicIndexer | OuterIndexer,
shape: _Shape,
indexing_support: IndexingSupport,
) -> tuple[ExplicitIndexer, ExplicitIndexer]:
"""
Decompose outer indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index the loaded on-memory np.ndarray.
Parameters
----------
indexer : OuterIndexer or BasicIndexer
indexing_support : One of the entries of IndexingSupport
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports basic indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> array = np.arange(36).reshape(6, 6)
>>> backend_indexer = BasicIndexer((slice(0, 3), slice(2, 4)))
>>> # load subslice of the array
... array = NumpyIndexingAdapter(array)[backend_indexer]
>>> np_indexer = OuterIndexer((np.array([0, 2, 1]), np.array([0, 1, 0])))
>>> # outer indexing for on-memory np.ndarray.
... NumpyIndexingAdapter(array).oindex[np_indexer]
array([[ 2, 3, 2],
[14, 15, 14],
[ 8, 9, 8]])
"""
backend_indexer: list[Any] = []
np_indexer: list[Any] = []
assert isinstance(indexer, OuterIndexer | BasicIndexer)
if indexing_support == IndexingSupport.VECTORIZED:
for k, s in zip(indexer.tuple, shape, strict=False):
if isinstance(k, slice):
# If it is a slice, then we will slice it as-is
# (but make its step positive) in the backend,
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
else:
backend_indexer.append(k)
if not is_scalar(k):
np_indexer.append(slice(None))
return type(indexer)(tuple(backend_indexer)), BasicIndexer(tuple(np_indexer))
# make indexer positive
pos_indexer: list[np.ndarray | int | np.number] = []
for k, s in zip(indexer.tuple, shape, strict=False):
if isinstance(k, np.ndarray):
pos_indexer.append(np.where(k < 0, k + s, k))
elif isinstance(k, integer_types) and k < 0:
pos_indexer.append(k + s)
else:
pos_indexer.append(k)
indexer_elems = pos_indexer
if indexing_support is IndexingSupport.OUTER_1VECTOR:
# some backends such as h5py supports only 1 vector in indexers
# We choose the most efficient axis
gains = [
(
(np.max(k) - np.min(k) + 1.0) / len(np.unique(k))
if isinstance(k, np.ndarray)
else 0
)
for k in indexer_elems
]
array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None
for i, (k, s) in enumerate(zip(indexer_elems, shape, strict=False)):
if isinstance(k, np.ndarray) and i != array_index:
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, np.ndarray):
# Remove duplicates and sort them in the increasing order
pkey, ekey = np.unique(k, return_inverse=True)
backend_indexer.append(pkey)
np_indexer.append(ekey)
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
if indexing_support == IndexingSupport.OUTER:
for k, s in zip(indexer_elems, shape, strict=False):
if isinstance(k, slice):
# slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
elif isinstance(k, integer_types):
backend_indexer.append(k)
elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all():
backend_indexer.append(k)
np_indexer.append(slice(None))
else:
# Remove duplicates and sort them in the increasing order
oind, vind = np.unique(k, return_inverse=True)
backend_indexer.append(oind)
np_indexer.append(vind.reshape(*k.shape))
return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
# basic indexer
assert indexing_support == IndexingSupport.BASIC
for k, s in zip(indexer_elems, shape, strict=False):
if isinstance(k, np.ndarray):
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (BasicIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
| _decompose_outer_indexer |
xarray | 8 | xarray/core/indexing.py | def _decompose_vectorized_indexer(
indexer: VectorizedIndexer,
shape: _Shape,
indexing_support: IndexingSupport,
) -> tuple[ExplicitIndexer, ExplicitIndexer]:
"""
Decompose vectorized indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index loaded on-memory np.ndarray.
Parameters
----------
indexer : VectorizedIndexer
indexing_support : one of IndexerSupport entries
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a vectorized indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports outer indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> array = np.arange(36).reshape(6, 6)
>>> backend_indexer = OuterIndexer((np.array([0, 1, 3]), np.array([2, 3])))
>>> # load subslice of the array
... array = NumpyIndexingAdapter(array).oindex[backend_indexer]
>>> np_indexer = VectorizedIndexer((np.array([0, 2, 1]), np.array([0, 1, 0])))
>>> # vectorized indexing for on-memory np.ndarray.
... NumpyIndexingAdapter(array).vindex[np_indexer]
array([ 2, 21, 8])
"""
| /usr/src/app/target_test_cases/failed_tests__decompose_vectorized_indexer.txt | def _decompose_vectorized_indexer(
indexer: VectorizedIndexer,
shape: _Shape,
indexing_support: IndexingSupport,
) -> tuple[ExplicitIndexer, ExplicitIndexer]:
"""
Decompose vectorized indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index loaded on-memory np.ndarray.
Parameters
----------
indexer : VectorizedIndexer
indexing_support : one of IndexerSupport entries
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a vectorized indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports outer indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> array = np.arange(36).reshape(6, 6)
>>> backend_indexer = OuterIndexer((np.array([0, 1, 3]), np.array([2, 3])))
>>> # load subslice of the array
... array = NumpyIndexingAdapter(array).oindex[backend_indexer]
>>> np_indexer = VectorizedIndexer((np.array([0, 2, 1]), np.array([0, 1, 0])))
>>> # vectorized indexing for on-memory np.ndarray.
... NumpyIndexingAdapter(array).vindex[np_indexer]
array([ 2, 21, 8])
"""
assert isinstance(indexer, VectorizedIndexer)
if indexing_support is IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
backend_indexer_elems = []
np_indexer_elems = []
# convert negative indices
indexer_elems = [
np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k
for k, s in zip(indexer.tuple, shape, strict=True)
]
for k, s in zip(indexer_elems, shape, strict=True):
if isinstance(k, slice):
# If it is a slice, then we will slice it as-is
# (but make its step positive) in the backend,
# and then use all of it (slice(None)) for the in-memory portion.
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer_elems.append(bk_slice)
np_indexer_elems.append(np_slice)
else:
# If it is a (multidimensional) np.ndarray, just pickup the used
# keys without duplication and store them as a 1d-np.ndarray.
oind, vind = np.unique(k, return_inverse=True)
backend_indexer_elems.append(oind)
np_indexer_elems.append(vind.reshape(*k.shape))
backend_indexer = OuterIndexer(tuple(backend_indexer_elems))
np_indexer = VectorizedIndexer(tuple(np_indexer_elems))
if indexing_support is IndexingSupport.OUTER:
return backend_indexer, np_indexer
# If the backend does not support outer indexing,
# backend_indexer (OuterIndexer) is also decomposed.
backend_indexer1, np_indexer1 = _decompose_outer_indexer(
backend_indexer, shape, indexing_support
)
np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)
return backend_indexer1, np_indexer
| _decompose_vectorized_indexer |
xarray | 9 | xarray/plot/utils.py | def _determine_cmap_params(
plot_data,
vmin=None,
vmax=None,
cmap=None,
center=None,
robust=False,
extend=None,
levels=None,
filled=True,
norm=None,
_is_facetgrid=False,
):
"""
Use some heuristics to set good defaults for colorbar and range.
Parameters
----------
plot_data : Numpy array
Doesn't handle xarray objects
Returns
-------
cmap_params : dict
Use depends on the type of the plotting function
"""
| /usr/src/app/target_test_cases/failed_tests__determine_cmap_params.txt | def _determine_cmap_params(
plot_data,
vmin=None,
vmax=None,
cmap=None,
center=None,
robust=False,
extend=None,
levels=None,
filled=True,
norm=None,
_is_facetgrid=False,
):
"""
Use some heuristics to set good defaults for colorbar and range.
Parameters
----------
plot_data : Numpy array
Doesn't handle xarray objects
Returns
-------
cmap_params : dict
Use depends on the type of the plotting function
"""
import matplotlib as mpl
if isinstance(levels, Iterable):
levels = sorted(levels)
calc_data = np.ravel(plot_data[np.isfinite(plot_data)])
# Handle all-NaN input data gracefully
if calc_data.size == 0:
# Arbitrary default for when all values are NaN
calc_data = np.array(0.0)
# Setting center=False prevents a divergent cmap
possibly_divergent = center is not False
# Set center to 0 so math below makes sense but remember its state
center_is_none = False
if center is None:
center = 0
center_is_none = True
# Setting both vmin and vmax prevents a divergent cmap
if (vmin is not None) and (vmax is not None):
possibly_divergent = False
# Setting vmin or vmax implies linspaced levels
user_minmax = (vmin is not None) or (vmax is not None)
# vlim might be computed below
vlim = None
# save state; needed later
vmin_was_none = vmin is None
vmax_was_none = vmax is None
if vmin is None:
if robust:
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
else:
vmin = calc_data.min()
elif possibly_divergent:
vlim = abs(vmin - center)
if vmax is None:
if robust:
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
else:
vmax = calc_data.max()
elif possibly_divergent:
vlim = abs(vmax - center)
if possibly_divergent:
levels_are_divergent = (
isinstance(levels, Iterable) and levels[0] * levels[-1] < 0
)
# kwargs not specific about divergent or not: infer defaults from data
divergent = (
((vmin < 0) and (vmax > 0)) or not center_is_none or levels_are_divergent
)
else:
divergent = False
# A divergent map should be symmetric around the center value
if divergent:
if vlim is None:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
# Now add in the centering value and set the limits
vmin += center
vmax += center
# now check norm and harmonize with vmin, vmax
if norm is not None:
if norm.vmin is None:
norm.vmin = vmin
else:
if not vmin_was_none and vmin != norm.vmin:
raise ValueError("Cannot supply vmin and a norm with a different vmin.")
vmin = norm.vmin
if norm.vmax is None:
norm.vmax = vmax
else:
if not vmax_was_none and vmax != norm.vmax:
raise ValueError("Cannot supply vmax and a norm with a different vmax.")
vmax = norm.vmax
# if BoundaryNorm, then set levels
if isinstance(norm, mpl.colors.BoundaryNorm):
levels = norm.boundaries
# Choose default colormaps if not provided
if cmap is None:
if divergent:
cmap = OPTIONS["cmap_divergent"]
else:
cmap = OPTIONS["cmap_sequential"]
# Handle discrete levels
if levels is not None:
if is_scalar(levels):
if user_minmax:
levels = np.linspace(vmin, vmax, levels)
elif levels == 1:
levels = np.asarray([(vmin + vmax) / 2])
else:
# N in MaxNLocator refers to bins, not ticks
ticker = mpl.ticker.MaxNLocator(levels - 1)
levels = ticker.tick_values(vmin, vmax)
vmin, vmax = levels[0], levels[-1]
# GH3734
if vmin == vmax:
vmin, vmax = mpl.ticker.LinearLocator(2).tick_values(vmin, vmax)
if extend is None:
extend = _determine_extend(calc_data, vmin, vmax)
if (levels is not None) and (not isinstance(norm, mpl.colors.BoundaryNorm)):
cmap, newnorm = _build_discrete_cmap(cmap, levels, extend, filled)
norm = newnorm if norm is None else norm
# vmin & vmax needs to be None if norm is passed
# TODO: always return a norm with vmin and vmax
if norm is not None:
vmin = None
vmax = None
return dict(
vmin=vmin, vmax=vmax, cmap=cmap, extend=extend, levels=levels, norm=norm
)
| _determine_cmap_params |
xarray | 10 | xarray/core/resample_cftime.py | def _get_range_edges(
first: CFTimeDatetime,
last: CFTimeDatetime,
freq: BaseCFTimeOffset,
closed: SideOptions = "left",
origin: str | CFTimeDatetime = "start_day",
offset: datetime.timedelta | None = None,
):
"""Get the correct starting and ending datetimes for the resampled
CFTimeIndex range.
Parameters
----------
first : cftime.datetime
Uncorrected starting datetime object for resampled CFTimeIndex range.
Usually the min of the original CFTimeIndex.
last : cftime.datetime
Uncorrected ending datetime object for resampled CFTimeIndex range.
Usually the max of the original CFTimeIndex.
freq : xarray.coding.cftime_offsets.BaseCFTimeOffset
The offset object representing target conversion a.k.a. resampling
frequency. Contains information on offset type (e.g. Day or 'D') and
offset magnitude (e.g., n = 3).
closed : 'left' or 'right'
Which side of bin interval is closed. Defaults to 'left'.
origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day'
The datetime on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a datetime is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
offset : datetime.timedelta, default is None
An offset timedelta added to the origin.
Returns
-------
first : cftime.datetime
Corrected starting datetime object for resampled CFTimeIndex range.
last : cftime.datetime
Corrected ending datetime object for resampled CFTimeIndex range.
"""
| /usr/src/app/target_test_cases/failed_tests__get_range_edges.txt | def _get_range_edges(
first: CFTimeDatetime,
last: CFTimeDatetime,
freq: BaseCFTimeOffset,
closed: SideOptions = "left",
origin: str | CFTimeDatetime = "start_day",
offset: datetime.timedelta | None = None,
):
"""Get the correct starting and ending datetimes for the resampled
CFTimeIndex range.
Parameters
----------
first : cftime.datetime
Uncorrected starting datetime object for resampled CFTimeIndex range.
Usually the min of the original CFTimeIndex.
last : cftime.datetime
Uncorrected ending datetime object for resampled CFTimeIndex range.
Usually the max of the original CFTimeIndex.
freq : xarray.coding.cftime_offsets.BaseCFTimeOffset
The offset object representing target conversion a.k.a. resampling
frequency. Contains information on offset type (e.g. Day or 'D') and
offset magnitude (e.g., n = 3).
closed : 'left' or 'right'
Which side of bin interval is closed. Defaults to 'left'.
origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day'
The datetime on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a datetime is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
offset : datetime.timedelta, default is None
An offset timedelta added to the origin.
Returns
-------
first : cftime.datetime
Corrected starting datetime object for resampled CFTimeIndex range.
last : cftime.datetime
Corrected ending datetime object for resampled CFTimeIndex range.
"""
if isinstance(freq, Tick):
first, last = _adjust_dates_anchored(
first, last, freq, closed=closed, origin=origin, offset=offset
)
return first, last
else:
first = normalize_date(first)
last = normalize_date(last)
first = freq.rollback(first) if closed == "left" else first - freq
last = last + freq
return first, last
| _get_range_edges |
xarray | 11 | xarray/core/resample_cftime.py | def _get_time_bins(
index: CFTimeIndex,
freq: BaseCFTimeOffset,
closed: SideOptions,
label: SideOptions,
origin: str | CFTimeDatetime,
offset: datetime.timedelta | None,
):
"""Obtain the bins and their respective labels for resampling operations.
Parameters
----------
index : CFTimeIndex
Index object to be resampled (e.g., CFTimeIndex named 'time').
freq : xarray.coding.cftime_offsets.BaseCFTimeOffset
The offset object representing target conversion a.k.a. resampling
frequency (e.g., 'MS', '2D', 'H', or '3T' with
coding.cftime_offsets.to_offset() applied to it).
closed : 'left' or 'right'
Which side of bin interval is closed.
The default is 'left' for all frequency offsets except for 'M' and 'A',
which have a default of 'right'.
label : 'left' or 'right'
Which bin edge label to label bucket with.
The default is 'left' for all frequency offsets except for 'M' and 'A',
which have a default of 'right'.
origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day'
The datetime on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a datetime is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
offset : datetime.timedelta, default is None
An offset timedelta added to the origin.
Returns
-------
datetime_bins : CFTimeIndex
Defines the edge of resampling bins by which original index values will
be grouped into.
labels : CFTimeIndex
Define what the user actually sees the bins labeled as.
"""
| /usr/src/app/target_test_cases/failed_tests__get_time_bins.txt | def _get_time_bins(
index: CFTimeIndex,
freq: BaseCFTimeOffset,
closed: SideOptions,
label: SideOptions,
origin: str | CFTimeDatetime,
offset: datetime.timedelta | None,
):
"""Obtain the bins and their respective labels for resampling operations.
Parameters
----------
index : CFTimeIndex
Index object to be resampled (e.g., CFTimeIndex named 'time').
freq : xarray.coding.cftime_offsets.BaseCFTimeOffset
The offset object representing target conversion a.k.a. resampling
frequency (e.g., 'MS', '2D', 'H', or '3T' with
coding.cftime_offsets.to_offset() applied to it).
closed : 'left' or 'right'
Which side of bin interval is closed.
The default is 'left' for all frequency offsets except for 'M' and 'A',
which have a default of 'right'.
label : 'left' or 'right'
Which bin edge label to label bucket with.
The default is 'left' for all frequency offsets except for 'M' and 'A',
which have a default of 'right'.
origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day'
The datetime on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a datetime is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
offset : datetime.timedelta, default is None
An offset timedelta added to the origin.
Returns
-------
datetime_bins : CFTimeIndex
Defines the edge of resampling bins by which original index values will
be grouped into.
labels : CFTimeIndex
Define what the user actually sees the bins labeled as.
"""
if not isinstance(index, CFTimeIndex):
raise TypeError(
"index must be a CFTimeIndex, but got "
f"an instance of {type(index).__name__!r}"
)
if len(index) == 0:
datetime_bins = labels = CFTimeIndex(data=[], name=index.name)
return datetime_bins, labels
first, last = _get_range_edges(
index.min(), index.max(), freq, closed=closed, origin=origin, offset=offset
)
datetime_bins = labels = cftime_range(
freq=freq, start=first, end=last, name=index.name
)
datetime_bins, labels = _adjust_bin_edges(
datetime_bins, freq, closed, index, labels
)
labels = labels[1:] if label == "right" else labels[:-1]
# TODO: when CFTimeIndex supports missing values, if the reference index
# contains missing values, insert the appropriate NaN value at the
# beginning of the datetime_bins and labels indexes.
return datetime_bins, labels
| _get_time_bins |
xarray | 12 | xarray/plot/utils.py | def _guess_coords_to_plot(
darray: DataArray,
coords_to_plot: MutableMapping[str, Hashable | None],
kwargs: dict,
default_guess: tuple[str, ...] = ("x",),
# TODO: Can this be normalized, plt.cbook.normalize_kwargs?
ignore_guess_kwargs: tuple[tuple[str, ...], ...] = ((),),
) -> MutableMapping[str, Hashable]:
"""
Guess what coords to plot if some of the values in coords_to_plot are None which
happens when the user has not defined all available ways of visualizing
the data.
Parameters
----------
darray : DataArray
The DataArray to check for available coords.
coords_to_plot : MutableMapping[str, Hashable]
Coords defined by the user to plot.
kwargs : dict
Extra kwargs that will be sent to matplotlib.
default_guess : Iterable[str], optional
Default values and order to retrieve dims if values in dims_plot is
missing, default: ("x", "hue", "size").
ignore_guess_kwargs : tuple[tuple[str, ...], ...]
Matplotlib arguments to ignore.
Examples
--------
>>> ds = xr.tutorial.scatter_example_dataset(seed=42)
>>> # Only guess x by default:
>>> xr.plot.utils._guess_coords_to_plot(
... ds.A,
... coords_to_plot={"x": None, "z": None, "hue": None, "size": None},
... kwargs={},
... )
{'x': 'x', 'z': None, 'hue': None, 'size': None}
>>> # Guess all plot dims with other default values:
>>> xr.plot.utils._guess_coords_to_plot(
... ds.A,
... coords_to_plot={"x": None, "z": None, "hue": None, "size": None},
... kwargs={},
... default_guess=("x", "hue", "size"),
... ignore_guess_kwargs=((), ("c", "color"), ("s",)),
... )
{'x': 'x', 'z': None, 'hue': 'y', 'size': 'z'}
>>> # Don't guess ´size´, since the matplotlib kwarg ´s´ has been defined:
>>> xr.plot.utils._guess_coords_to_plot(
... ds.A,
... coords_to_plot={"x": None, "z": None, "hue": None, "size": None},
... kwargs={"s": 5},
... default_guess=("x", "hue", "size"),
... ignore_guess_kwargs=((), ("c", "color"), ("s",)),
... )
{'x': 'x', 'z': None, 'hue': 'y', 'size': None}
>>> # Prioritize ´size´ over ´s´:
>>> xr.plot.utils._guess_coords_to_plot(
... ds.A,
... coords_to_plot={"x": None, "z": None, "hue": None, "size": "x"},
... kwargs={"s": 5},
... default_guess=("x", "hue", "size"),
... ignore_guess_kwargs=((), ("c", "color"), ("s",)),
... )
{'x': 'y', 'z': None, 'hue': 'z', 'size': 'x'}
"""
| /usr/src/app/target_test_cases/failed_tests__guess_coords_to_plot.txt | def _guess_coords_to_plot(
darray: DataArray,
coords_to_plot: MutableMapping[str, Hashable | None],
kwargs: dict,
default_guess: tuple[str, ...] = ("x",),
# TODO: Can this be normalized, plt.cbook.normalize_kwargs?
ignore_guess_kwargs: tuple[tuple[str, ...], ...] = ((),),
) -> MutableMapping[str, Hashable]:
"""
Guess what coords to plot if some of the values in coords_to_plot are None which
happens when the user has not defined all available ways of visualizing
the data.
Parameters
----------
darray : DataArray
The DataArray to check for available coords.
coords_to_plot : MutableMapping[str, Hashable]
Coords defined by the user to plot.
kwargs : dict
Extra kwargs that will be sent to matplotlib.
default_guess : Iterable[str], optional
Default values and order to retrieve dims if values in dims_plot is
missing, default: ("x", "hue", "size").
ignore_guess_kwargs : tuple[tuple[str, ...], ...]
Matplotlib arguments to ignore.
Examples
--------
>>> ds = xr.tutorial.scatter_example_dataset(seed=42)
>>> # Only guess x by default:
>>> xr.plot.utils._guess_coords_to_plot(
... ds.A,
... coords_to_plot={"x": None, "z": None, "hue": None, "size": None},
... kwargs={},
... )
{'x': 'x', 'z': None, 'hue': None, 'size': None}
>>> # Guess all plot dims with other default values:
>>> xr.plot.utils._guess_coords_to_plot(
... ds.A,
... coords_to_plot={"x": None, "z": None, "hue": None, "size": None},
... kwargs={},
... default_guess=("x", "hue", "size"),
... ignore_guess_kwargs=((), ("c", "color"), ("s",)),
... )
{'x': 'x', 'z': None, 'hue': 'y', 'size': 'z'}
>>> # Don't guess ´size´, since the matplotlib kwarg ´s´ has been defined:
>>> xr.plot.utils._guess_coords_to_plot(
... ds.A,
... coords_to_plot={"x": None, "z": None, "hue": None, "size": None},
... kwargs={"s": 5},
... default_guess=("x", "hue", "size"),
... ignore_guess_kwargs=((), ("c", "color"), ("s",)),
... )
{'x': 'x', 'z': None, 'hue': 'y', 'size': None}
>>> # Prioritize ´size´ over ´s´:
>>> xr.plot.utils._guess_coords_to_plot(
... ds.A,
... coords_to_plot={"x": None, "z": None, "hue": None, "size": "x"},
... kwargs={"s": 5},
... default_guess=("x", "hue", "size"),
... ignore_guess_kwargs=((), ("c", "color"), ("s",)),
... )
{'x': 'y', 'z': None, 'hue': 'z', 'size': 'x'}
"""
coords_to_plot_exist = {k: v for k, v in coords_to_plot.items() if v is not None}
available_coords = tuple(
k for k in darray.coords.keys() if k not in coords_to_plot_exist.values()
)
# If dims_plot[k] isn't defined then fill with one of the available dims, unless
# one of related mpl kwargs has been used. This should have similar behaviour as
# * plt.plot(x, y) -> Multiple lines with different colors if y is 2d.
# * plt.plot(x, y, color="red") -> Multiple red lines if y is 2d.
for k, dim, ign_kws in zip(
default_guess, available_coords, ignore_guess_kwargs, strict=False
):
if coords_to_plot.get(k, None) is None and all(
kwargs.get(ign_kw, None) is None for ign_kw in ign_kws
):
coords_to_plot[k] = dim
for k, dim in coords_to_plot.items():
_assert_valid_xy(darray, dim, k)
return coords_to_plot
| _guess_coords_to_plot |
xarray | 13 | xarray/coding/cftimeindex.py | def _parse_array_of_cftime_strings(strings, date_type):
"""Create a numpy array from an array of strings.
For use in generating dates from strings for use with interp. Assumes the
array is either 0-dimensional or 1-dimensional.
Parameters
----------
strings : array of strings
Strings to convert to dates
date_type : cftime.datetime type
Calendar type to use for dates
Returns
-------
np.array
"""
| /usr/src/app/target_test_cases/failed_tests__parse_array_of_cftime_strings.txt | def _parse_array_of_cftime_strings(strings, date_type):
"""Create a numpy array from an array of strings.
For use in generating dates from strings for use with interp. Assumes the
array is either 0-dimensional or 1-dimensional.
Parameters
----------
strings : array of strings
Strings to convert to dates
date_type : cftime.datetime type
Calendar type to use for dates
Returns
-------
np.array
"""
return np.array(
[_parse_iso8601_without_reso(date_type, s) for s in strings.ravel()]
).reshape(strings.shape)
| _parse_array_of_cftime_strings |
xarray | 14 | xarray/coding/cftimeindex.py | def _partial_date_slice(self, resolution, parsed):
"""Adapted from
pandas.tseries.index.DatetimeIndex._partial_date_slice
Note that when using a CFTimeIndex, if a partial-date selection
returns a single element, it will never be converted to a scalar
coordinate; this is in slight contrast to the behavior when using
a DatetimeIndex, which sometimes will return a DataArray with a scalar
coordinate depending on the resolution of the datetimes used in
defining the index. For example:
>>> from cftime import DatetimeNoLeap
>>> da = xr.DataArray(
... [1, 2],
... coords=[[DatetimeNoLeap(2001, 1, 1), DatetimeNoLeap(2001, 2, 1)]],
... dims=["time"],
... )
>>> da.sel(time="2001-01-01")
<xarray.DataArray (time: 1)> Size: 8B
array([1])
Coordinates:
* time (time) object 8B 2001-01-01 00:00:00
>>> da = xr.DataArray(
... [1, 2],
... coords=[[pd.Timestamp(2001, 1, 1), pd.Timestamp(2001, 2, 1)]],
... dims=["time"],
... )
>>> da.sel(time="2001-01-01")
<xarray.DataArray ()> Size: 8B
array(1)
Coordinates:
time datetime64[ns] 8B 2001-01-01
>>> da = xr.DataArray(
... [1, 2],
... coords=[[pd.Timestamp(2001, 1, 1, 1), pd.Timestamp(2001, 2, 1)]],
... dims=["time"],
... )
>>> da.sel(time="2001-01-01")
<xarray.DataArray (time: 1)> Size: 8B
array([1])
Coordinates:
* time (time) datetime64[ns] 8B 2001-01-01T01:00:00
"""
| /usr/src/app/target_test_cases/failed_tests__partial_date_slice.txt | def _partial_date_slice(self, resolution, parsed):
"""Adapted from
pandas.tseries.index.DatetimeIndex._partial_date_slice
Note that when using a CFTimeIndex, if a partial-date selection
returns a single element, it will never be converted to a scalar
coordinate; this is in slight contrast to the behavior when using
a DatetimeIndex, which sometimes will return a DataArray with a scalar
coordinate depending on the resolution of the datetimes used in
defining the index. For example:
>>> from cftime import DatetimeNoLeap
>>> da = xr.DataArray(
... [1, 2],
... coords=[[DatetimeNoLeap(2001, 1, 1), DatetimeNoLeap(2001, 2, 1)]],
... dims=["time"],
... )
>>> da.sel(time="2001-01-01")
<xarray.DataArray (time: 1)> Size: 8B
array([1])
Coordinates:
* time (time) object 8B 2001-01-01 00:00:00
>>> da = xr.DataArray(
... [1, 2],
... coords=[[pd.Timestamp(2001, 1, 1), pd.Timestamp(2001, 2, 1)]],
... dims=["time"],
... )
>>> da.sel(time="2001-01-01")
<xarray.DataArray ()> Size: 8B
array(1)
Coordinates:
time datetime64[ns] 8B 2001-01-01
>>> da = xr.DataArray(
... [1, 2],
... coords=[[pd.Timestamp(2001, 1, 1, 1), pd.Timestamp(2001, 2, 1)]],
... dims=["time"],
... )
>>> da.sel(time="2001-01-01")
<xarray.DataArray (time: 1)> Size: 8B
array([1])
Coordinates:
* time (time) datetime64[ns] 8B 2001-01-01T01:00:00
"""
start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed)
times = self._data
if self.is_monotonic_increasing:
if len(times) and (
(start < times[0] and end < times[0])
or (start > times[-1] and end > times[-1])
):
# we are out of range
raise KeyError
# a monotonic (sorted) series can be sliced
left = times.searchsorted(start, side="left")
right = times.searchsorted(end, side="right")
return slice(left, right)
lhs_mask = times >= start
rhs_mask = times <= end
return np.flatnonzero(lhs_mask & rhs_mask)
| _partial_date_slice |
xarray | 15 | xarray/plot/dataarray_plot.py | def _prepare_plot1d_data(
darray: T_DataArray,
coords_to_plot: MutableMapping[str, Hashable],
plotfunc_name: str | None = None,
_is_facetgrid: bool = False,
) -> dict[str, T_DataArray]:
"""
Prepare data for usage with plt.scatter.
Parameters
----------
darray : T_DataArray
Base DataArray.
coords_to_plot : MutableMapping[str, Hashable]
Coords that will be plotted.
plotfunc_name : str | None
Name of the plotting function that will be used.
Returns
-------
plts : dict[str, T_DataArray]
Dict of DataArrays that will be sent to matplotlib.
Examples
--------
>>> # Make sure int coords are plotted:
>>> a = xr.DataArray(
... data=[1, 2],
... coords={1: ("x", [0, 1], {"units": "s"})},
... dims=("x",),
... name="a",
... )
>>> plts = xr.plot.dataarray_plot._prepare_plot1d_data(
... a, coords_to_plot={"x": 1, "z": None, "hue": None, "size": None}
... )
>>> # Check which coords to plot:
>>> print({k: v.name for k, v in plts.items()})
{'y': 'a', 'x': 1}
"""
| /usr/src/app/target_test_cases/failed_tests__prepare_plot1d_data.txt | def _prepare_plot1d_data(
darray: T_DataArray,
coords_to_plot: MutableMapping[str, Hashable],
plotfunc_name: str | None = None,
_is_facetgrid: bool = False,
) -> dict[str, T_DataArray]:
"""
Prepare data for usage with plt.scatter.
Parameters
----------
darray : T_DataArray
Base DataArray.
coords_to_plot : MutableMapping[str, Hashable]
Coords that will be plotted.
plotfunc_name : str | None
Name of the plotting function that will be used.
Returns
-------
plts : dict[str, T_DataArray]
Dict of DataArrays that will be sent to matplotlib.
Examples
--------
>>> # Make sure int coords are plotted:
>>> a = xr.DataArray(
... data=[1, 2],
... coords={1: ("x", [0, 1], {"units": "s"})},
... dims=("x",),
... name="a",
... )
>>> plts = xr.plot.dataarray_plot._prepare_plot1d_data(
... a, coords_to_plot={"x": 1, "z": None, "hue": None, "size": None}
... )
>>> # Check which coords to plot:
>>> print({k: v.name for k, v in plts.items()})
{'y': 'a', 'x': 1}
"""
# If there are more than 1 dimension in the array than stack all the
# dimensions so the plotter can plot anything:
if darray.ndim > 1:
# When stacking dims the lines will continue connecting. For floats
# this can be solved by adding a nan element in between the flattening
# points:
dims_T = []
if np.issubdtype(darray.dtype, np.floating):
for v in ["z", "x"]:
dim = coords_to_plot.get(v, None)
if (dim is not None) and (dim in darray.dims):
darray_nan = np.nan * darray.isel({dim: -1})
darray = concat([darray, darray_nan], dim=dim)
dims_T.append(coords_to_plot[v])
# Lines should never connect to the same coordinate when stacked,
# transpose to avoid this as much as possible:
darray = darray.transpose(..., *dims_T)
# Array is now ready to be stacked:
darray = darray.stack(_stacked_dim=darray.dims)
# Broadcast together all the chosen variables:
plts = dict(y=darray)
plts.update(
{k: darray.coords[v] for k, v in coords_to_plot.items() if v is not None}
)
plts = dict(zip(plts.keys(), broadcast(*(plts.values())), strict=True))
return plts
| _prepare_plot1d_data |
xarray | 16 | xarray/core/formatting_html.py | def _wrap_datatree_repr(r: str, end: bool = False) -> str:
"""
Wrap HTML representation with a tee to the left of it.
Enclosing HTML tag is a <div> with :code:`display: inline-grid` style.
Turns:
[ title ]
| details |
|_____________|
into (A):
|─ [ title ]
| | details |
| |_____________|
or (B):
└─ [ title ]
| details |
|_____________|
Parameters
----------
r: str
HTML representation to wrap.
end: bool
Specify if the line on the left should continue or end.
Default is True.
Returns
-------
str
Wrapped HTML representation.
Tee color is set to the variable :code:`--xr-border-color`.
"""
| /usr/src/app/target_test_cases/failed_tests__wrap_datatree_repr.txt | def _wrap_datatree_repr(r: str, end: bool = False) -> str:
"""
Wrap HTML representation with a tee to the left of it.
Enclosing HTML tag is a <div> with :code:`display: inline-grid` style.
Turns:
[ title ]
| details |
|_____________|
into (A):
|─ [ title ]
| | details |
| |_____________|
or (B):
└─ [ title ]
| details |
|_____________|
Parameters
----------
r: str
HTML representation to wrap.
end: bool
Specify if the line on the left should continue or end.
Default is True.
Returns
-------
str
Wrapped HTML representation.
Tee color is set to the variable :code:`--xr-border-color`.
"""
# height of line
end = bool(end)
height = "100%" if end is False else "1.2em"
return "".join(
[
"<div style='display: inline-grid; grid-template-columns: 0px 20px auto; width: 100%;'>",
"<div style='",
"grid-column-start: 1;",
"border-right: 0.2em solid;",
"border-color: var(--xr-border-color);",
f"height: {height};",
"width: 0px;",
"'>",
"</div>",
"<div style='",
"grid-column-start: 2;",
"grid-row-start: 1;",
"height: 1em;",
"width: 20px;",
"border-bottom: 0.2em solid;",
"border-color: var(--xr-border-color);",
"'>",
"</div>",
"<div style='",
"grid-column-start: 3;",
"'>",
r,
"</div>",
"</div>",
]
)
| _wrap_datatree_repr |
xarray | 17 | xarray/core/_aggregations.py | def any(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``any`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``any`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``any`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.any
dask.array.any
DataArray.any
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 78B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) bool 6B True True True True True False
>>> ds.any()
<xarray.Dataset> Size: 1B
Dimensions: ()
Data variables:
da bool 1B True
"""
| /usr/src/app/target_test_cases/failed_tests_any.txt | def any(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``any`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``any`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``any`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.any
dask.array.any
DataArray.any
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([True, True, True, True, True, False], dtype=bool),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 78B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) bool 6B True True True True True False
>>> ds.any()
<xarray.Dataset> Size: 1B
Dimensions: ()
Data variables:
da bool 1B True
"""
return self.reduce(
duck_array_ops.array_any,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
| any |
xarray | 18 | xarray/core/computation.py | def apply_ufunc(
func: Callable,
*args: Any,
input_core_dims: Sequence[Sequence] | None = None,
output_core_dims: Sequence[Sequence] | None = ((),),
exclude_dims: Set = frozenset(),
vectorize: bool = False,
join: JoinOptions = "exact",
dataset_join: str = "exact",
dataset_fill_value: object = _NO_FILL_VALUE,
keep_attrs: bool | str | None = None,
kwargs: Mapping | None = None,
dask: Literal["forbidden", "allowed", "parallelized"] = "forbidden",
output_dtypes: Sequence | None = None,
output_sizes: Mapping[Any, int] | None = None,
meta: Any = None,
dask_gufunc_kwargs: dict[str, Any] | None = None,
on_missing_core_dim: MissingCoreDimOptions = "raise",
) -> Any:
"""Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, DataArrayGroupBy, DatasetGroupBy, Variable, \
numpy.ndarray, dask.array.Array or scalar
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : sequence of sequence, optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : list of tuple, optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument. Only dimensions listed
here are allowed to change size between input and output objects.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", "override"} or bool, optional
- 'drop' or False: empty attrs on returned xarray object.
- 'identical': all attrs must be the same on every object.
- 'no_conflicts': attrs from all objects are combined, any that have the same name must also have the same value.
- 'drop_conflicts': attrs from all objects are combined, any that have the same name but different values are dropped.
- 'override' or True: skip comparing and copy attrs from the first object to the result.
kwargs : dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask : {"forbidden", "allowed", "parallelized"}, default: "forbidden"
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``. Prefer this option if
``func`` natively supports dask arrays.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array by using :py:func:`dask.array.apply_gufunc`. Multiple output
arguments are supported. Only use this option if ``func`` does not natively
support dask arrays (e.g. converts them to numpy arrays).
dask_gufunc_kwargs : dict, optional
Optional keyword arguments passed to :py:func:`dask.array.apply_gufunc` if
dask='parallelized'. Possible keywords are ``output_sizes``, ``allow_rechunk``
and ``meta``.
output_dtypes : list of dtype, optional
Optional list of output dtypes. Only used if ``dask='parallelized'`` or
``vectorize=True``.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs. ``output_sizes`` should be given in the ``dask_gufunc_kwargs``
parameter. It will be removed as direct parameter in a future version.
meta : optional
Size-0 object representing the type of array wrapped by dask array. Passed on to
:py:func:`dask.array.apply_gufunc`. ``meta`` should be given in the
``dask_gufunc_kwargs`` parameter . It will be removed as direct parameter
a future version.
on_missing_core_dim : {"raise", "copy", "drop"}, default: "raise"
How to handle missing core dimensions on input variables.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Notes
-----
This function is designed for the more common case where ``func`` can work on numpy
arrays. If ``func`` needs to manipulate a whole xarray object subset to each block
it is possible to use :py:func:`xarray.map_blocks`.
Note that due to the overhead :py:func:`xarray.map_blocks` is considerably slower than ``apply_ufunc``.
Examples
--------
Calculate the vector magnitude of two arguments:
>>> def magnitude(a, b):
... func = lambda x, y: np.sqrt(x**2 + y**2)
... return xr.apply_ufunc(func, a, b)
...
You can now apply ``magnitude()`` to :py:class:`DataArray` and :py:class:`Dataset`
objects, with automatically preserved dimensions and coordinates, e.g.,
>>> array = xr.DataArray([1, 2, 3], coords=[("x", [0.1, 0.2, 0.3])])
>>> magnitude(array, -array)
<xarray.DataArray (x: 3)> Size: 24B
array([1.41421356, 2.82842712, 4.24264069])
Coordinates:
* x (x) float64 24B 0.1 0.2 0.3
Plain scalars, numpy arrays and a mix of these with xarray objects is also
supported:
>>> magnitude(3, 4)
np.float64(5.0)
>>> magnitude(3, np.array([0, 4]))
array([3., 5.])
>>> magnitude(array, 0)
<xarray.DataArray (x: 3)> Size: 24B
array([1., 2., 3.])
Coordinates:
* x (x) float64 24B 0.1 0.2 0.3
Other examples of how you could use ``apply_ufunc`` to write functions to
(very nearly) replicate existing xarray functionality:
Compute the mean (``.mean``) over one dimension:
>>> def mean(obj, dim):
... # note: apply always moves core dimensions to the end
... return apply_ufunc(
... np.mean, obj, input_core_dims=[[dim]], kwargs={"axis": -1}
... )
...
Inner product over a specific dimension (like :py:func:`dot`):
>>> def _inner(x, y):
... result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
... return result[..., 0, 0]
...
>>> def inner_product(a, b, dim):
... return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
...
Stack objects along a new dimension (like :py:func:`concat`):
>>> def stack(objects, dim, new_coord):
... # note: this version does not stack coordinates
... func = lambda *x: np.stack(x, axis=-1)
... result = apply_ufunc(
... func,
... *objects,
... output_core_dims=[[dim]],
... join="outer",
... dataset_fill_value=np.nan
... )
... result[dim] = new_coord
... return result
...
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors:
>>> import scipy.stats
>>> def earth_mover_distance(first_samples, second_samples, dim="ensemble"):
... return apply_ufunc(
... scipy.stats.wasserstein_distance,
... first_samples,
... second_samples,
... input_core_dims=[[dim], [dim]],
... vectorize=True,
... )
...
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in ``apply_ufunc``. You may find helper functions such as
:py:func:`numpy.broadcast_arrays` helpful in writing your function. ``apply_ufunc`` also
works well with :py:func:`numba.vectorize` and :py:func:`numba.guvectorize`.
See Also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
dask.array.apply_gufunc
xarray.map_blocks
Notes
-----
:ref:`dask.automatic-parallelization`
User guide describing :py:func:`apply_ufunc` and :py:func:`map_blocks`.
:doc:`xarray-tutorial:advanced/apply_ufunc/apply_ufunc`
Advanced Tutorial on applying numpy function using :py:func:`apply_ufunc`
References
----------
.. [1] https://numpy.org/doc/stable/reference/ufuncs.html
.. [2] https://numpy.org/doc/stable/reference/c-api/generalized-ufuncs.html
"""
| /usr/src/app/target_test_cases/failed_tests_apply_ufunc.txt | def apply_ufunc(
func: Callable,
*args: Any,
input_core_dims: Sequence[Sequence] | None = None,
output_core_dims: Sequence[Sequence] | None = ((),),
exclude_dims: Set = frozenset(),
vectorize: bool = False,
join: JoinOptions = "exact",
dataset_join: str = "exact",
dataset_fill_value: object = _NO_FILL_VALUE,
keep_attrs: bool | str | None = None,
kwargs: Mapping | None = None,
dask: Literal["forbidden", "allowed", "parallelized"] = "forbidden",
output_dtypes: Sequence | None = None,
output_sizes: Mapping[Any, int] | None = None,
meta: Any = None,
dask_gufunc_kwargs: dict[str, Any] | None = None,
on_missing_core_dim: MissingCoreDimOptions = "raise",
) -> Any:
"""Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, DataArrayGroupBy, DatasetGroupBy, Variable, \
numpy.ndarray, dask.array.Array or scalar
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : sequence of sequence, optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : list of tuple, optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument. Only dimensions listed
here are allowed to change size between input and output objects.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", "override"} or bool, optional
- 'drop' or False: empty attrs on returned xarray object.
- 'identical': all attrs must be the same on every object.
- 'no_conflicts': attrs from all objects are combined, any that have the same name must also have the same value.
- 'drop_conflicts': attrs from all objects are combined, any that have the same name but different values are dropped.
- 'override' or True: skip comparing and copy attrs from the first object to the result.
kwargs : dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask : {"forbidden", "allowed", "parallelized"}, default: "forbidden"
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``. Prefer this option if
``func`` natively supports dask arrays.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array by using :py:func:`dask.array.apply_gufunc`. Multiple output
arguments are supported. Only use this option if ``func`` does not natively
support dask arrays (e.g. converts them to numpy arrays).
dask_gufunc_kwargs : dict, optional
Optional keyword arguments passed to :py:func:`dask.array.apply_gufunc` if
dask='parallelized'. Possible keywords are ``output_sizes``, ``allow_rechunk``
and ``meta``.
output_dtypes : list of dtype, optional
Optional list of output dtypes. Only used if ``dask='parallelized'`` or
``vectorize=True``.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs. ``output_sizes`` should be given in the ``dask_gufunc_kwargs``
parameter. It will be removed as direct parameter in a future version.
meta : optional
Size-0 object representing the type of array wrapped by dask array. Passed on to
:py:func:`dask.array.apply_gufunc`. ``meta`` should be given in the
``dask_gufunc_kwargs`` parameter . It will be removed as direct parameter
a future version.
on_missing_core_dim : {"raise", "copy", "drop"}, default: "raise"
How to handle missing core dimensions on input variables.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Notes
-----
This function is designed for the more common case where ``func`` can work on numpy
arrays. If ``func`` needs to manipulate a whole xarray object subset to each block
it is possible to use :py:func:`xarray.map_blocks`.
Note that due to the overhead :py:func:`xarray.map_blocks` is considerably slower than ``apply_ufunc``.
Examples
--------
Calculate the vector magnitude of two arguments:
>>> def magnitude(a, b):
... func = lambda x, y: np.sqrt(x**2 + y**2)
... return xr.apply_ufunc(func, a, b)
...
You can now apply ``magnitude()`` to :py:class:`DataArray` and :py:class:`Dataset`
objects, with automatically preserved dimensions and coordinates, e.g.,
>>> array = xr.DataArray([1, 2, 3], coords=[("x", [0.1, 0.2, 0.3])])
>>> magnitude(array, -array)
<xarray.DataArray (x: 3)> Size: 24B
array([1.41421356, 2.82842712, 4.24264069])
Coordinates:
* x (x) float64 24B 0.1 0.2 0.3
Plain scalars, numpy arrays and a mix of these with xarray objects is also
supported:
>>> magnitude(3, 4)
np.float64(5.0)
>>> magnitude(3, np.array([0, 4]))
array([3., 5.])
>>> magnitude(array, 0)
<xarray.DataArray (x: 3)> Size: 24B
array([1., 2., 3.])
Coordinates:
* x (x) float64 24B 0.1 0.2 0.3
Other examples of how you could use ``apply_ufunc`` to write functions to
(very nearly) replicate existing xarray functionality:
Compute the mean (``.mean``) over one dimension:
>>> def mean(obj, dim):
... # note: apply always moves core dimensions to the end
... return apply_ufunc(
... np.mean, obj, input_core_dims=[[dim]], kwargs={"axis": -1}
... )
...
Inner product over a specific dimension (like :py:func:`dot`):
>>> def _inner(x, y):
... result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
... return result[..., 0, 0]
...
>>> def inner_product(a, b, dim):
... return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
...
Stack objects along a new dimension (like :py:func:`concat`):
>>> def stack(objects, dim, new_coord):
... # note: this version does not stack coordinates
... func = lambda *x: np.stack(x, axis=-1)
... result = apply_ufunc(
... func,
... *objects,
... output_core_dims=[[dim]],
... join="outer",
... dataset_fill_value=np.nan
... )
... result[dim] = new_coord
... return result
...
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors:
>>> import scipy.stats
>>> def earth_mover_distance(first_samples, second_samples, dim="ensemble"):
... return apply_ufunc(
... scipy.stats.wasserstein_distance,
... first_samples,
... second_samples,
... input_core_dims=[[dim], [dim]],
... vectorize=True,
... )
...
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in ``apply_ufunc``. You may find helper functions such as
:py:func:`numpy.broadcast_arrays` helpful in writing your function. ``apply_ufunc`` also
works well with :py:func:`numba.vectorize` and :py:func:`numba.guvectorize`.
See Also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
dask.array.apply_gufunc
xarray.map_blocks
Notes
-----
:ref:`dask.automatic-parallelization`
User guide describing :py:func:`apply_ufunc` and :py:func:`map_blocks`.
:doc:`xarray-tutorial:advanced/apply_ufunc/apply_ufunc`
Advanced Tutorial on applying numpy function using :py:func:`apply_ufunc`
References
----------
.. [1] https://numpy.org/doc/stable/reference/ufuncs.html
.. [2] https://numpy.org/doc/stable/reference/c-api/generalized-ufuncs.html
"""
from xarray.core.dataarray import DataArray
from xarray.core.groupby import GroupBy
from xarray.core.variable import Variable
if input_core_dims is None:
input_core_dims = ((),) * (len(args))
elif len(input_core_dims) != len(args):
raise ValueError(
f"input_core_dims must be None or a tuple with the length same to "
f"the number of arguments. "
f"Given {len(input_core_dims)} input_core_dims: {input_core_dims}, "
f" but number of args is {len(args)}."
)
if kwargs is None:
kwargs = {}
signature = _UFuncSignature(input_core_dims, output_core_dims)
if exclude_dims:
if not isinstance(exclude_dims, set):
raise TypeError(
f"Expected exclude_dims to be a 'set'. Received '{type(exclude_dims).__name__}' instead."
)
if not exclude_dims <= signature.all_core_dims:
raise ValueError(
f"each dimension in `exclude_dims` must also be a "
f"core dimension in the function signature. "
f"Please make {(exclude_dims - signature.all_core_dims)} a core dimension"
)
# handle dask_gufunc_kwargs
if dask == "parallelized":
if dask_gufunc_kwargs is None:
dask_gufunc_kwargs = {}
else:
dask_gufunc_kwargs = dask_gufunc_kwargs.copy()
# todo: remove warnings after deprecation cycle
if meta is not None:
warnings.warn(
"``meta`` should be given in the ``dask_gufunc_kwargs`` parameter."
" It will be removed as direct parameter in a future version.",
FutureWarning,
stacklevel=2,
)
dask_gufunc_kwargs.setdefault("meta", meta)
if output_sizes is not None:
warnings.warn(
"``output_sizes`` should be given in the ``dask_gufunc_kwargs`` "
"parameter. It will be removed as direct parameter in a future "
"version.",
FutureWarning,
stacklevel=2,
)
dask_gufunc_kwargs.setdefault("output_sizes", output_sizes)
if kwargs:
func = functools.partial(func, **kwargs)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if isinstance(keep_attrs, bool):
keep_attrs = "override" if keep_attrs else "drop"
variables_vfunc = functools.partial(
apply_variable_ufunc,
func,
signature=signature,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
dask=dask,
vectorize=vectorize,
output_dtypes=output_dtypes,
dask_gufunc_kwargs=dask_gufunc_kwargs,
)
# feed groupby-apply_ufunc through apply_groupby_func
if any(isinstance(a, GroupBy) for a in args):
this_apply = functools.partial(
apply_ufunc,
func,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
exclude_dims=exclude_dims,
join=join,
dataset_join=dataset_join,
dataset_fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
dask=dask,
vectorize=vectorize,
output_dtypes=output_dtypes,
dask_gufunc_kwargs=dask_gufunc_kwargs,
)
return apply_groupby_func(this_apply, *args)
# feed datasets apply_variable_ufunc through apply_dataset_vfunc
elif any(is_dict_like(a) for a in args):
return apply_dataset_vfunc(
variables_vfunc,
*args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
dataset_join=dataset_join,
fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
on_missing_core_dim=on_missing_core_dim,
)
# feed DataArray apply_variable_ufunc through apply_dataarray_vfunc
elif any(isinstance(a, DataArray) for a in args):
return apply_dataarray_vfunc(
variables_vfunc,
*args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
)
# feed Variables directly through apply_variable_ufunc
elif any(isinstance(a, Variable) for a in args):
return variables_vfunc(*args)
else:
# feed anything else through apply_array_ufunc
return apply_array_ufunc(func, *args, dask=dask)
| apply_ufunc |
xarray | 19 | xarray/core/common.py | def assign_coords(
self,
coords: Mapping | None = None,
**coords_kwargs: Any,
) -> Self:
"""Assign new coordinates to this object.
Returns a new object with all the original data in addition to the new
coordinates.
Parameters
----------
coords : mapping of dim to coord, optional
A mapping whose keys are the names of the coordinates and values are the
coordinates to assign. The mapping will generally be a dict or
:class:`Coordinates`.
* If a value is a standard data value — for example, a ``DataArray``,
scalar, or array — the data is simply assigned as a coordinate.
* If a value is callable, it is called with this object as the only
parameter, and the return value is used as new coordinate variables.
* A coordinate can also be defined and attached to an existing dimension
using a tuple with the first element the dimension name and the second
element the values for this new coordinate.
**coords_kwargs : optional
The keyword arguments form of ``coords``.
One of ``coords`` or ``coords_kwargs`` must be provided.
Returns
-------
assigned : same type as caller
A new object with the new coordinates in addition to the existing
data.
Examples
--------
Convert `DataArray` longitude coordinates from 0-359 to -180-179:
>>> da = xr.DataArray(
... np.random.rand(4),
... coords=[np.array([358, 359, 0, 1])],
... dims="lon",
... )
>>> da
<xarray.DataArray (lon: 4)> Size: 32B
array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])
Coordinates:
* lon (lon) int64 32B 358 359 0 1
>>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))
<xarray.DataArray (lon: 4)> Size: 32B
array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])
Coordinates:
* lon (lon) int64 32B -2 -1 0 1
The function also accepts dictionary arguments:
>>> da.assign_coords({"lon": (((da.lon + 180) % 360) - 180)})
<xarray.DataArray (lon: 4)> Size: 32B
array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])
Coordinates:
* lon (lon) int64 32B -2 -1 0 1
New coordinate can also be attached to an existing dimension:
>>> lon_2 = np.array([300, 289, 0, 1])
>>> da.assign_coords(lon_2=("lon", lon_2))
<xarray.DataArray (lon: 4)> Size: 32B
array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])
Coordinates:
* lon (lon) int64 32B 358 359 0 1
lon_2 (lon) int64 32B 300 289 0 1
Note that the same result can also be obtained with a dict e.g.
>>> _ = da.assign_coords({"lon_2": ("lon", lon_2)})
Note the same method applies to `Dataset` objects.
Convert `Dataset` longitude coordinates from 0-359 to -180-179:
>>> temperature = np.linspace(20, 32, num=16).reshape(2, 2, 4)
>>> precipitation = 2 * np.identity(4).reshape(2, 2, 4)
>>> ds = xr.Dataset(
... data_vars=dict(
... temperature=(["x", "y", "time"], temperature),
... precipitation=(["x", "y", "time"], precipitation),
... ),
... coords=dict(
... lon=(["x", "y"], [[260.17, 260.68], [260.21, 260.77]]),
... lat=(["x", "y"], [[42.25, 42.21], [42.63, 42.59]]),
... time=pd.date_range("2014-09-06", periods=4),
... reference_time=pd.Timestamp("2014-09-05"),
... ),
... attrs=dict(description="Weather-related data"),
... )
>>> ds
<xarray.Dataset> Size: 360B
Dimensions: (x: 2, y: 2, time: 4)
Coordinates:
lon (x, y) float64 32B 260.2 260.7 260.2 260.8
lat (x, y) float64 32B 42.25 42.21 42.63 42.59
* time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09
reference_time datetime64[ns] 8B 2014-09-05
Dimensions without coordinates: x, y
Data variables:
temperature (x, y, time) float64 128B 20.0 20.8 21.6 ... 30.4 31.2 32.0
precipitation (x, y, time) float64 128B 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0
Attributes:
description: Weather-related data
>>> ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180))
<xarray.Dataset> Size: 360B
Dimensions: (x: 2, y: 2, time: 4)
Coordinates:
lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 32B 42.25 42.21 42.63 42.59
* time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09
reference_time datetime64[ns] 8B 2014-09-05
Dimensions without coordinates: x, y
Data variables:
temperature (x, y, time) float64 128B 20.0 20.8 21.6 ... 30.4 31.2 32.0
precipitation (x, y, time) float64 128B 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0
Attributes:
description: Weather-related data
See Also
--------
Dataset.assign
Dataset.swap_dims
Dataset.set_coords
"""
| /usr/src/app/target_test_cases/failed_tests_assign_coords.txt | def assign_coords(
self,
coords: Mapping | None = None,
**coords_kwargs: Any,
) -> Self:
"""Assign new coordinates to this object.
Returns a new object with all the original data in addition to the new
coordinates.
Parameters
----------
coords : mapping of dim to coord, optional
A mapping whose keys are the names of the coordinates and values are the
coordinates to assign. The mapping will generally be a dict or
:class:`Coordinates`.
* If a value is a standard data value — for example, a ``DataArray``,
scalar, or array — the data is simply assigned as a coordinate.
* If a value is callable, it is called with this object as the only
parameter, and the return value is used as new coordinate variables.
* A coordinate can also be defined and attached to an existing dimension
using a tuple with the first element the dimension name and the second
element the values for this new coordinate.
**coords_kwargs : optional
The keyword arguments form of ``coords``.
One of ``coords`` or ``coords_kwargs`` must be provided.
Returns
-------
assigned : same type as caller
A new object with the new coordinates in addition to the existing
data.
Examples
--------
Convert `DataArray` longitude coordinates from 0-359 to -180-179:
>>> da = xr.DataArray(
... np.random.rand(4),
... coords=[np.array([358, 359, 0, 1])],
... dims="lon",
... )
>>> da
<xarray.DataArray (lon: 4)> Size: 32B
array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])
Coordinates:
* lon (lon) int64 32B 358 359 0 1
>>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))
<xarray.DataArray (lon: 4)> Size: 32B
array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])
Coordinates:
* lon (lon) int64 32B -2 -1 0 1
The function also accepts dictionary arguments:
>>> da.assign_coords({"lon": (((da.lon + 180) % 360) - 180)})
<xarray.DataArray (lon: 4)> Size: 32B
array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])
Coordinates:
* lon (lon) int64 32B -2 -1 0 1
New coordinate can also be attached to an existing dimension:
>>> lon_2 = np.array([300, 289, 0, 1])
>>> da.assign_coords(lon_2=("lon", lon_2))
<xarray.DataArray (lon: 4)> Size: 32B
array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])
Coordinates:
* lon (lon) int64 32B 358 359 0 1
lon_2 (lon) int64 32B 300 289 0 1
Note that the same result can also be obtained with a dict e.g.
>>> _ = da.assign_coords({"lon_2": ("lon", lon_2)})
Note the same method applies to `Dataset` objects.
Convert `Dataset` longitude coordinates from 0-359 to -180-179:
>>> temperature = np.linspace(20, 32, num=16).reshape(2, 2, 4)
>>> precipitation = 2 * np.identity(4).reshape(2, 2, 4)
>>> ds = xr.Dataset(
... data_vars=dict(
... temperature=(["x", "y", "time"], temperature),
... precipitation=(["x", "y", "time"], precipitation),
... ),
... coords=dict(
... lon=(["x", "y"], [[260.17, 260.68], [260.21, 260.77]]),
... lat=(["x", "y"], [[42.25, 42.21], [42.63, 42.59]]),
... time=pd.date_range("2014-09-06", periods=4),
... reference_time=pd.Timestamp("2014-09-05"),
... ),
... attrs=dict(description="Weather-related data"),
... )
>>> ds
<xarray.Dataset> Size: 360B
Dimensions: (x: 2, y: 2, time: 4)
Coordinates:
lon (x, y) float64 32B 260.2 260.7 260.2 260.8
lat (x, y) float64 32B 42.25 42.21 42.63 42.59
* time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09
reference_time datetime64[ns] 8B 2014-09-05
Dimensions without coordinates: x, y
Data variables:
temperature (x, y, time) float64 128B 20.0 20.8 21.6 ... 30.4 31.2 32.0
precipitation (x, y, time) float64 128B 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0
Attributes:
description: Weather-related data
>>> ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180))
<xarray.Dataset> Size: 360B
Dimensions: (x: 2, y: 2, time: 4)
Coordinates:
lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 32B 42.25 42.21 42.63 42.59
* time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09
reference_time datetime64[ns] 8B 2014-09-05
Dimensions without coordinates: x, y
Data variables:
temperature (x, y, time) float64 128B 20.0 20.8 21.6 ... 30.4 31.2 32.0
precipitation (x, y, time) float64 128B 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0
Attributes:
description: Weather-related data
See Also
--------
Dataset.assign
Dataset.swap_dims
Dataset.set_coords
"""
from xarray.core.coordinates import Coordinates
coords_combined = either_dict_or_kwargs(coords, coords_kwargs, "assign_coords")
data = self.copy(deep=False)
results: Coordinates | dict[Hashable, Any]
if isinstance(coords, Coordinates):
results = coords
else:
results = self._calc_assign_results(coords_combined)
data.coords.update(results)
return data
| assign_coords |
xarray | 20 | xarray/core/variable.py | def astype(
self,
dtype,
*,
order=None,
casting=None,
subok=None,
copy=None,
keep_attrs=True,
) -> Self:
"""
Copy of the Variable object, with data cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result. ‘C’ means C order,
‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are
Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to
the order the array elements appear in memory as possible.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise the
returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
keep_attrs : bool, optional
By default, astype keeps attributes. Set to False to remove
attributes in the returned object.
Returns
-------
out : same as object
New object with data cast to the specified type.
Notes
-----
The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed
through to the ``astype`` method of the underlying array when a value
different than ``None`` is supplied.
Make sure to only supply these arguments if the underlying array class
supports them.
See Also
--------
numpy.ndarray.astype
dask.array.Array.astype
sparse.COO.astype
"""
| /usr/src/app/target_test_cases/failed_tests_astype.txt | def astype(
self,
dtype,
*,
order=None,
casting=None,
subok=None,
copy=None,
keep_attrs=True,
) -> Self:
"""
Copy of the Variable object, with data cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result. ‘C’ means C order,
‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are
Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to
the order the array elements appear in memory as possible.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise the
returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
keep_attrs : bool, optional
By default, astype keeps attributes. Set to False to remove
attributes in the returned object.
Returns
-------
out : same as object
New object with data cast to the specified type.
Notes
-----
The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed
through to the ``astype`` method of the underlying array when a value
different than ``None`` is supplied.
Make sure to only supply these arguments if the underlying array class
supports them.
See Also
--------
numpy.ndarray.astype
dask.array.Array.astype
sparse.COO.astype
"""
from xarray.core.computation import apply_ufunc
kwargs = dict(order=order, casting=casting, subok=subok, copy=copy)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
return apply_ufunc(
duck_array_ops.astype,
self,
dtype,
kwargs=kwargs,
keep_attrs=keep_attrs,
dask="allowed",
)
| astype |
xarray | 21 | xarray/core/computation.py | def build_output_coords_and_indexes(
args: Iterable[Any],
signature: _UFuncSignature,
exclude_dims: Set = frozenset(),
combine_attrs: CombineAttrsOptions = "override",
) -> tuple[list[dict[Any, Variable]], list[dict[Any, Index]]]:
"""Build output coordinates and indexes for an operation.
Parameters
----------
args : Iterable
List of raw operation arguments. Any valid types for xarray operations
are OK, e.g., scalars, Variable, DataArray, Dataset.
signature : _UfuncSignature
Core dimensions signature for the operation.
exclude_dims : set, optional
Dimensions excluded from the operation. Coordinates along these
dimensions are dropped.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
Dictionaries of Variable and Index objects with merged coordinates.
"""
| /usr/src/app/target_test_cases/failed_tests_build_output_coords_and_indexes.txt | def build_output_coords_and_indexes(
args: Iterable[Any],
signature: _UFuncSignature,
exclude_dims: Set = frozenset(),
combine_attrs: CombineAttrsOptions = "override",
) -> tuple[list[dict[Any, Variable]], list[dict[Any, Index]]]:
"""Build output coordinates and indexes for an operation.
Parameters
----------
args : Iterable
List of raw operation arguments. Any valid types for xarray operations
are OK, e.g., scalars, Variable, DataArray, Dataset.
signature : _UfuncSignature
Core dimensions signature for the operation.
exclude_dims : set, optional
Dimensions excluded from the operation. Coordinates along these
dimensions are dropped.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
Dictionaries of Variable and Index objects with merged coordinates.
"""
coords_list = _get_coords_list(args)
if len(coords_list) == 1 and not exclude_dims:
# we can skip the expensive merge
(unpacked_coords,) = coords_list
merged_vars = dict(unpacked_coords.variables)
merged_indexes = dict(unpacked_coords.xindexes)
else:
merged_vars, merged_indexes = merge_coordinates_without_align(
coords_list, exclude_dims=exclude_dims, combine_attrs=combine_attrs
)
output_coords = []
output_indexes = []
for output_dims in signature.output_core_dims:
dropped_dims = signature.all_input_core_dims - set(output_dims)
if dropped_dims:
filtered_coords = {
k: v for k, v in merged_vars.items() if dropped_dims.isdisjoint(v.dims)
}
filtered_indexes = filter_indexes_from_coords(
merged_indexes, set(filtered_coords)
)
else:
filtered_coords = merged_vars
filtered_indexes = merged_indexes
output_coords.append(filtered_coords)
output_indexes.append(filtered_indexes)
return output_coords, output_indexes
| build_output_coords_and_indexes |
xarray | 22 | xarray/core/accessor_str.py | def cat(self, *others, sep: str | bytes | Any = "") -> T_DataArray:
"""
Concatenate strings elementwise in the DataArray with other strings.
The other strings can either be string scalars or other array-like.
Dimensions are automatically broadcast together.
An optional separator `sep` can also be specified. If `sep` is
array-like, it is broadcast against the array and applied elementwise.
Parameters
----------
*others : str or array-like of str
Strings or array-like of strings to concatenate elementwise with
the current DataArray.
sep : str or array-like of str, default: "".
Separator to use between strings.
It is broadcast in the same way as the other input strings.
If array-like, its dimensions will be placed at the end of the output array dimensions.
Returns
-------
concatenated : same type as values
Examples
--------
Create a string array
>>> myarray = xr.DataArray(
... ["11111", "4"],
... dims=["X"],
... )
Create some arrays to concatenate with it
>>> values_1 = xr.DataArray(
... ["a", "bb", "cccc"],
... dims=["Y"],
... )
>>> values_2 = np.array(3.4)
>>> values_3 = ""
>>> values_4 = np.array("test", dtype=np.str_)
Determine the separator to use
>>> seps = xr.DataArray(
... [" ", ", "],
... dims=["ZZ"],
... )
Concatenate the arrays using the separator
>>> myarray.str.cat(values_1, values_2, values_3, values_4, sep=seps)
<xarray.DataArray (X: 2, Y: 3, ZZ: 2)> Size: 1kB
array([[['11111 a 3.4 test', '11111, a, 3.4, , test'],
['11111 bb 3.4 test', '11111, bb, 3.4, , test'],
['11111 cccc 3.4 test', '11111, cccc, 3.4, , test']],
<BLANKLINE>
[['4 a 3.4 test', '4, a, 3.4, , test'],
['4 bb 3.4 test', '4, bb, 3.4, , test'],
['4 cccc 3.4 test', '4, cccc, 3.4, , test']]], dtype='<U24')
Dimensions without coordinates: X, Y, ZZ
See Also
--------
pandas.Series.str.cat
str.join
"""
| /usr/src/app/target_test_cases/failed_tests_cat.txt | def cat(self, *others, sep: str | bytes | Any = "") -> T_DataArray:
"""
Concatenate strings elementwise in the DataArray with other strings.
The other strings can either be string scalars or other array-like.
Dimensions are automatically broadcast together.
An optional separator `sep` can also be specified. If `sep` is
array-like, it is broadcast against the array and applied elementwise.
Parameters
----------
*others : str or array-like of str
Strings or array-like of strings to concatenate elementwise with
the current DataArray.
sep : str or array-like of str, default: "".
Separator to use between strings.
It is broadcast in the same way as the other input strings.
If array-like, its dimensions will be placed at the end of the output array dimensions.
Returns
-------
concatenated : same type as values
Examples
--------
Create a string array
>>> myarray = xr.DataArray(
... ["11111", "4"],
... dims=["X"],
... )
Create some arrays to concatenate with it
>>> values_1 = xr.DataArray(
... ["a", "bb", "cccc"],
... dims=["Y"],
... )
>>> values_2 = np.array(3.4)
>>> values_3 = ""
>>> values_4 = np.array("test", dtype=np.str_)
Determine the separator to use
>>> seps = xr.DataArray(
... [" ", ", "],
... dims=["ZZ"],
... )
Concatenate the arrays using the separator
>>> myarray.str.cat(values_1, values_2, values_3, values_4, sep=seps)
<xarray.DataArray (X: 2, Y: 3, ZZ: 2)> Size: 1kB
array([[['11111 a 3.4 test', '11111, a, 3.4, , test'],
['11111 bb 3.4 test', '11111, bb, 3.4, , test'],
['11111 cccc 3.4 test', '11111, cccc, 3.4, , test']],
<BLANKLINE>
[['4 a 3.4 test', '4, a, 3.4, , test'],
['4 bb 3.4 test', '4, bb, 3.4, , test'],
['4 cccc 3.4 test', '4, cccc, 3.4, , test']]], dtype='<U24')
Dimensions without coordinates: X, Y, ZZ
See Also
--------
pandas.Series.str.cat
str.join
"""
sep = self._stringify(sep)
others = tuple(self._stringify(x) for x in others)
others = others + (sep,)
# sep will go at the end of the input arguments.
func = lambda *x: x[-1].join(x[:-1])
return self._apply(
func=func,
func_args=others,
dtype=self._obj.dtype.kind,
)
| cat |
xarray | 23 | xarray/coding/cftime_offsets.py | def cftime_range(
start=None,
end=None,
periods=None,
freq=None,
normalize=False,
name=None,
closed: NoDefault | SideOptions = no_default,
inclusive: None | InclusiveOptions = None,
calendar="standard",
) -> CFTimeIndex:
"""Return a fixed frequency CFTimeIndex.
Parameters
----------
start : str or cftime.datetime, optional
Left bound for generating dates.
end : str or cftime.datetime, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or None, default: "D"
Frequency strings can have multiples, e.g. "5h" and negative values, e.g. "-1D".
normalize : bool, default: False
Normalize start/end dates to midnight before generating date range.
name : str, default: None
Name of the resulting index
closed : {None, "left", "right"}, default: "NO_DEFAULT"
Make the interval closed with respect to the given frequency to the
"left", "right", or both sides (None).
.. deprecated:: 2023.02.0
Following pandas, the ``closed`` parameter is deprecated in favor
of the ``inclusive`` parameter, and will be removed in a future
version of xarray.
inclusive : {None, "both", "neither", "left", "right"}, default None
Include boundaries; whether to set each bound as closed or open.
.. versionadded:: 2023.02.0
calendar : str, default: "standard"
Calendar type for the datetimes.
Returns
-------
CFTimeIndex
Notes
-----
This function is an analog of ``pandas.date_range`` for use in generating
sequences of ``cftime.datetime`` objects. It supports most of the
features of ``pandas.date_range`` (e.g. specifying how the index is
``closed`` on either side, or whether or not to ``normalize`` the start and
end bounds); however, there are some notable exceptions:
- You cannot specify a ``tz`` (time zone) argument.
- Start or end dates specified as partial-datetime strings must use the
`ISO-8601 format <https://en.wikipedia.org/wiki/ISO_8601>`_.
- It supports many, but not all, frequencies supported by
``pandas.date_range``. For example it does not currently support any of
the business-related or semi-monthly frequencies.
- Compound sub-monthly frequencies are not supported, e.g. '1H1min', as
these can easily be written in terms of the finest common resolution,
e.g. '61min'.
Valid simple frequency strings for use with ``cftime``-calendars include
any multiples of the following.
+--------+--------------------------+
| Alias | Description |
+========+==========================+
| YE | Year-end frequency |
+--------+--------------------------+
| YS | Year-start frequency |
+--------+--------------------------+
| QE | Quarter-end frequency |
+--------+--------------------------+
| QS | Quarter-start frequency |
+--------+--------------------------+
| ME | Month-end frequency |
+--------+--------------------------+
| MS | Month-start frequency |
+--------+--------------------------+
| D | Day frequency |
+--------+--------------------------+
| h | Hour frequency |
+--------+--------------------------+
| min | Minute frequency |
+--------+--------------------------+
| s | Second frequency |
+--------+--------------------------+
| ms | Millisecond frequency |
+--------+--------------------------+
| us | Microsecond frequency |
+--------+--------------------------+
Any multiples of the following anchored offsets are also supported.
+------------+--------------------------------------------------------------------+
| Alias | Description |
+============+====================================================================+
| Y(E,S)-JAN | Annual frequency, anchored at the (end, beginning) of January |
+------------+--------------------------------------------------------------------+
| Y(E,S)-FEB | Annual frequency, anchored at the (end, beginning) of February |
+------------+--------------------------------------------------------------------+
| Y(E,S)-MAR | Annual frequency, anchored at the (end, beginning) of March |
+------------+--------------------------------------------------------------------+
| Y(E,S)-APR | Annual frequency, anchored at the (end, beginning) of April |
+------------+--------------------------------------------------------------------+
| Y(E,S)-MAY | Annual frequency, anchored at the (end, beginning) of May |
+------------+--------------------------------------------------------------------+
| Y(E,S)-JUN | Annual frequency, anchored at the (end, beginning) of June |
+------------+--------------------------------------------------------------------+
| Y(E,S)-JUL | Annual frequency, anchored at the (end, beginning) of July |
+------------+--------------------------------------------------------------------+
| Y(E,S)-AUG | Annual frequency, anchored at the (end, beginning) of August |
+------------+--------------------------------------------------------------------+
| Y(E,S)-SEP | Annual frequency, anchored at the (end, beginning) of September |
+------------+--------------------------------------------------------------------+
| Y(E,S)-OCT | Annual frequency, anchored at the (end, beginning) of October |
+------------+--------------------------------------------------------------------+
| Y(E,S)-NOV | Annual frequency, anchored at the (end, beginning) of November |
+------------+--------------------------------------------------------------------+
| Y(E,S)-DEC | Annual frequency, anchored at the (end, beginning) of December |
+------------+--------------------------------------------------------------------+
| Q(E,S)-JAN | Quarter frequency, anchored at the (end, beginning) of January |
+------------+--------------------------------------------------------------------+
| Q(E,S)-FEB | Quarter frequency, anchored at the (end, beginning) of February |
+------------+--------------------------------------------------------------------+
| Q(E,S)-MAR | Quarter frequency, anchored at the (end, beginning) of March |
+------------+--------------------------------------------------------------------+
| Q(E,S)-APR | Quarter frequency, anchored at the (end, beginning) of April |
+------------+--------------------------------------------------------------------+
| Q(E,S)-MAY | Quarter frequency, anchored at the (end, beginning) of May |
+------------+--------------------------------------------------------------------+
| Q(E,S)-JUN | Quarter frequency, anchored at the (end, beginning) of June |
+------------+--------------------------------------------------------------------+
| Q(E,S)-JUL | Quarter frequency, anchored at the (end, beginning) of July |
+------------+--------------------------------------------------------------------+
| Q(E,S)-AUG | Quarter frequency, anchored at the (end, beginning) of August |
+------------+--------------------------------------------------------------------+
| Q(E,S)-SEP | Quarter frequency, anchored at the (end, beginning) of September |
+------------+--------------------------------------------------------------------+
| Q(E,S)-OCT | Quarter frequency, anchored at the (end, beginning) of October |
+------------+--------------------------------------------------------------------+
| Q(E,S)-NOV | Quarter frequency, anchored at the (end, beginning) of November |
+------------+--------------------------------------------------------------------+
| Q(E,S)-DEC | Quarter frequency, anchored at the (end, beginning) of December |
+------------+--------------------------------------------------------------------+
Finally, the following calendar aliases are supported.
+--------------------------------+---------------------------------------+
| Alias | Date type |
+================================+=======================================+
| standard, gregorian | ``cftime.DatetimeGregorian`` |
+--------------------------------+---------------------------------------+
| proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` |
+--------------------------------+---------------------------------------+
| noleap, 365_day | ``cftime.DatetimeNoLeap`` |
+--------------------------------+---------------------------------------+
| all_leap, 366_day | ``cftime.DatetimeAllLeap`` |
+--------------------------------+---------------------------------------+
| 360_day | ``cftime.Datetime360Day`` |
+--------------------------------+---------------------------------------+
| julian | ``cftime.DatetimeJulian`` |
+--------------------------------+---------------------------------------+
Examples
--------
This function returns a ``CFTimeIndex``, populated with ``cftime.datetime``
objects associated with the specified calendar type, e.g.
>>> xr.cftime_range(start="2000", periods=6, freq="2MS", calendar="noleap")
CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00,
2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00],
dtype='object', length=6, calendar='noleap', freq='2MS')
As in the standard pandas function, three of the ``start``, ``end``,
``periods``, or ``freq`` arguments must be specified at a given time, with
the other set to ``None``. See the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html>`_
for more examples of the behavior of ``date_range`` with each of the
parameters.
See Also
--------
pandas.date_range
"""
| /usr/src/app/target_test_cases/failed_tests_cftime_range.txt | def cftime_range(
start=None,
end=None,
periods=None,
freq=None,
normalize=False,
name=None,
closed: NoDefault | SideOptions = no_default,
inclusive: None | InclusiveOptions = None,
calendar="standard",
) -> CFTimeIndex:
"""Return a fixed frequency CFTimeIndex.
Parameters
----------
start : str or cftime.datetime, optional
Left bound for generating dates.
end : str or cftime.datetime, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or None, default: "D"
Frequency strings can have multiples, e.g. "5h" and negative values, e.g. "-1D".
normalize : bool, default: False
Normalize start/end dates to midnight before generating date range.
name : str, default: None
Name of the resulting index
closed : {None, "left", "right"}, default: "NO_DEFAULT"
Make the interval closed with respect to the given frequency to the
"left", "right", or both sides (None).
.. deprecated:: 2023.02.0
Following pandas, the ``closed`` parameter is deprecated in favor
of the ``inclusive`` parameter, and will be removed in a future
version of xarray.
inclusive : {None, "both", "neither", "left", "right"}, default None
Include boundaries; whether to set each bound as closed or open.
.. versionadded:: 2023.02.0
calendar : str, default: "standard"
Calendar type for the datetimes.
Returns
-------
CFTimeIndex
Notes
-----
This function is an analog of ``pandas.date_range`` for use in generating
sequences of ``cftime.datetime`` objects. It supports most of the
features of ``pandas.date_range`` (e.g. specifying how the index is
``closed`` on either side, or whether or not to ``normalize`` the start and
end bounds); however, there are some notable exceptions:
- You cannot specify a ``tz`` (time zone) argument.
- Start or end dates specified as partial-datetime strings must use the
`ISO-8601 format <https://en.wikipedia.org/wiki/ISO_8601>`_.
- It supports many, but not all, frequencies supported by
``pandas.date_range``. For example it does not currently support any of
the business-related or semi-monthly frequencies.
- Compound sub-monthly frequencies are not supported, e.g. '1H1min', as
these can easily be written in terms of the finest common resolution,
e.g. '61min'.
Valid simple frequency strings for use with ``cftime``-calendars include
any multiples of the following.
+--------+--------------------------+
| Alias | Description |
+========+==========================+
| YE | Year-end frequency |
+--------+--------------------------+
| YS | Year-start frequency |
+--------+--------------------------+
| QE | Quarter-end frequency |
+--------+--------------------------+
| QS | Quarter-start frequency |
+--------+--------------------------+
| ME | Month-end frequency |
+--------+--------------------------+
| MS | Month-start frequency |
+--------+--------------------------+
| D | Day frequency |
+--------+--------------------------+
| h | Hour frequency |
+--------+--------------------------+
| min | Minute frequency |
+--------+--------------------------+
| s | Second frequency |
+--------+--------------------------+
| ms | Millisecond frequency |
+--------+--------------------------+
| us | Microsecond frequency |
+--------+--------------------------+
Any multiples of the following anchored offsets are also supported.
+------------+--------------------------------------------------------------------+
| Alias | Description |
+============+====================================================================+
| Y(E,S)-JAN | Annual frequency, anchored at the (end, beginning) of January |
+------------+--------------------------------------------------------------------+
| Y(E,S)-FEB | Annual frequency, anchored at the (end, beginning) of February |
+------------+--------------------------------------------------------------------+
| Y(E,S)-MAR | Annual frequency, anchored at the (end, beginning) of March |
+------------+--------------------------------------------------------------------+
| Y(E,S)-APR | Annual frequency, anchored at the (end, beginning) of April |
+------------+--------------------------------------------------------------------+
| Y(E,S)-MAY | Annual frequency, anchored at the (end, beginning) of May |
+------------+--------------------------------------------------------------------+
| Y(E,S)-JUN | Annual frequency, anchored at the (end, beginning) of June |
+------------+--------------------------------------------------------------------+
| Y(E,S)-JUL | Annual frequency, anchored at the (end, beginning) of July |
+------------+--------------------------------------------------------------------+
| Y(E,S)-AUG | Annual frequency, anchored at the (end, beginning) of August |
+------------+--------------------------------------------------------------------+
| Y(E,S)-SEP | Annual frequency, anchored at the (end, beginning) of September |
+------------+--------------------------------------------------------------------+
| Y(E,S)-OCT | Annual frequency, anchored at the (end, beginning) of October |
+------------+--------------------------------------------------------------------+
| Y(E,S)-NOV | Annual frequency, anchored at the (end, beginning) of November |
+------------+--------------------------------------------------------------------+
| Y(E,S)-DEC | Annual frequency, anchored at the (end, beginning) of December |
+------------+--------------------------------------------------------------------+
| Q(E,S)-JAN | Quarter frequency, anchored at the (end, beginning) of January |
+------------+--------------------------------------------------------------------+
| Q(E,S)-FEB | Quarter frequency, anchored at the (end, beginning) of February |
+------------+--------------------------------------------------------------------+
| Q(E,S)-MAR | Quarter frequency, anchored at the (end, beginning) of March |
+------------+--------------------------------------------------------------------+
| Q(E,S)-APR | Quarter frequency, anchored at the (end, beginning) of April |
+------------+--------------------------------------------------------------------+
| Q(E,S)-MAY | Quarter frequency, anchored at the (end, beginning) of May |
+------------+--------------------------------------------------------------------+
| Q(E,S)-JUN | Quarter frequency, anchored at the (end, beginning) of June |
+------------+--------------------------------------------------------------------+
| Q(E,S)-JUL | Quarter frequency, anchored at the (end, beginning) of July |
+------------+--------------------------------------------------------------------+
| Q(E,S)-AUG | Quarter frequency, anchored at the (end, beginning) of August |
+------------+--------------------------------------------------------------------+
| Q(E,S)-SEP | Quarter frequency, anchored at the (end, beginning) of September |
+------------+--------------------------------------------------------------------+
| Q(E,S)-OCT | Quarter frequency, anchored at the (end, beginning) of October |
+------------+--------------------------------------------------------------------+
| Q(E,S)-NOV | Quarter frequency, anchored at the (end, beginning) of November |
+------------+--------------------------------------------------------------------+
| Q(E,S)-DEC | Quarter frequency, anchored at the (end, beginning) of December |
+------------+--------------------------------------------------------------------+
Finally, the following calendar aliases are supported.
+--------------------------------+---------------------------------------+
| Alias | Date type |
+================================+=======================================+
| standard, gregorian | ``cftime.DatetimeGregorian`` |
+--------------------------------+---------------------------------------+
| proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` |
+--------------------------------+---------------------------------------+
| noleap, 365_day | ``cftime.DatetimeNoLeap`` |
+--------------------------------+---------------------------------------+
| all_leap, 366_day | ``cftime.DatetimeAllLeap`` |
+--------------------------------+---------------------------------------+
| 360_day | ``cftime.Datetime360Day`` |
+--------------------------------+---------------------------------------+
| julian | ``cftime.DatetimeJulian`` |
+--------------------------------+---------------------------------------+
Examples
--------
This function returns a ``CFTimeIndex``, populated with ``cftime.datetime``
objects associated with the specified calendar type, e.g.
>>> xr.cftime_range(start="2000", periods=6, freq="2MS", calendar="noleap")
CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00,
2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00],
dtype='object', length=6, calendar='noleap', freq='2MS')
As in the standard pandas function, three of the ``start``, ``end``,
``periods``, or ``freq`` arguments must be specified at a given time, with
the other set to ``None``. See the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html>`_
for more examples of the behavior of ``date_range`` with each of the
parameters.
See Also
--------
pandas.date_range
"""
if freq is None and any(arg is None for arg in [periods, start, end]):
freq = "D"
# Adapted from pandas.core.indexes.datetimes._generate_range.
if count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the arguments 'start', 'end', 'periods', and 'freq', three "
"must be specified at a time."
)
if start is not None:
start = to_cftime_datetime(start, calendar)
start = _maybe_normalize_date(start, normalize)
if end is not None:
end = to_cftime_datetime(end, calendar)
end = _maybe_normalize_date(end, normalize)
if freq is None:
dates = _generate_linear_range(start, end, periods)
else:
offset = to_offset(freq)
dates = np.array(list(_generate_range(start, end, periods, offset)))
inclusive = _infer_inclusive(closed, inclusive)
if inclusive == "neither":
left_closed = False
right_closed = False
elif inclusive == "left":
left_closed = True
right_closed = False
elif inclusive == "right":
left_closed = False
right_closed = True
elif inclusive == "both":
left_closed = True
right_closed = True
else:
raise ValueError(
f"Argument `inclusive` must be either 'both', 'neither', "
f"'left', 'right', or None. Got {inclusive}."
)
if not left_closed and len(dates) and start is not None and dates[0] == start:
dates = dates[1:]
if not right_closed and len(dates) and end is not None and dates[-1] == end:
dates = dates[:-1]
return CFTimeIndex(dates, name=name)
| cftime_range |
xarray | 24 | xarray/core/datatree_mapping.py | def check_isomorphic(
a: DataTree,
b: DataTree,
require_names_equal: bool = False,
check_from_root: bool = True,
):
"""
Check that two trees have the same structure, raising an error if not.
Does not compare the actual data in the nodes.
By default this function only checks that subtrees are isomorphic, not the entire tree above (if it exists).
Can instead optionally check the entire trees starting from the root, which will ensure all
Can optionally check if corresponding nodes should have the same name.
Parameters
----------
a : DataTree
b : DataTree
require_names_equal : Bool
Whether or not to also check that each node has the same name as its counterpart.
check_from_root : Bool
Whether or not to first traverse to the root of the trees before checking for isomorphism.
If a & b have no parents then this has no effect.
Raises
------
TypeError
If either a or b are not tree objects.
TreeIsomorphismError
If a and b are tree objects, but are not isomorphic to one another.
Also optionally raised if their structure is isomorphic, but the names of any two
respective nodes are not equal.
"""
| /usr/src/app/target_test_cases/failed_tests_check_isomorphic.txt | def check_isomorphic(
a: DataTree,
b: DataTree,
require_names_equal: bool = False,
check_from_root: bool = True,
):
"""
Check that two trees have the same structure, raising an error if not.
Does not compare the actual data in the nodes.
By default this function only checks that subtrees are isomorphic, not the entire tree above (if it exists).
Can instead optionally check the entire trees starting from the root, which will ensure all
Can optionally check if corresponding nodes should have the same name.
Parameters
----------
a : DataTree
b : DataTree
require_names_equal : Bool
Whether or not to also check that each node has the same name as its counterpart.
check_from_root : Bool
Whether or not to first traverse to the root of the trees before checking for isomorphism.
If a & b have no parents then this has no effect.
Raises
------
TypeError
If either a or b are not tree objects.
TreeIsomorphismError
If a and b are tree objects, but are not isomorphic to one another.
Also optionally raised if their structure is isomorphic, but the names of any two
respective nodes are not equal.
"""
if not isinstance(a, TreeNode):
raise TypeError(f"Argument `a` is not a tree, it is of type {type(a)}")
if not isinstance(b, TreeNode):
raise TypeError(f"Argument `b` is not a tree, it is of type {type(b)}")
if check_from_root:
a = a.root
b = b.root
diff = diff_treestructure(a, b, require_names_equal=require_names_equal)
if diff:
raise TreeIsomorphismError("DataTree objects are not isomorphic:\n" + diff)
| check_isomorphic |
xarray | 25 | xarray/core/combine.py | def combine_by_coords(
data_objects: Iterable[Dataset | DataArray] = [],
compat: CompatOptions = "no_conflicts",
data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
coords: str = "different",
fill_value: object = dtypes.NA,
join: JoinOptions = "outer",
combine_attrs: CombineAttrsOptions = "no_conflicts",
) -> Dataset | DataArray:
"""
Attempt to auto-magically combine the given datasets (or data arrays)
into one by using dimension coordinates.
This function attempts to combine a group of datasets along any number of
dimensions into a single entity by inspecting coords and metadata and using
a combination of concat and merge.
Will attempt to order the datasets such that the values in their dimension
coordinates are monotonic along all dimensions. If it cannot determine the
order in which to concatenate the datasets, it will raise a ValueError.
Non-coordinate dimensions will be ignored, as will any coordinate
dimensions which do not vary between each dataset.
Aligns coordinates, but different variables on datasets can cause it
to fail under some scenarios. In complex cases, you may need to clean up
your data and use concat/merge explicitly (also see `combine_nested`).
Works well if, for example, you have N years of data and M data variables,
and each combination of a distinct time period and set of data variables is
saved as its own dataset. Also useful for if you have a simulation which is
parallelized in multiple dimensions, but has global coordinates saved in
each file specifying the positions of points within the global domain.
Parameters
----------
data_objects : Iterable of Datasets or DataArrays
Data objects to combine.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
data_vars : {"minimal", "different", "all" or list of str}, optional
These data variables will be concatenated together:
- "minimal": Only data variables in which the dimension already
appears are included.
- "different": Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
- "all": All data variables will be concatenated.
- list of str: The listed data variables will be concatenated, in
addition to the "minimal" data variables.
If objects are DataArrays, `data_vars` must be "all".
coords : {"minimal", "different", "all"} or list of str, optional
As per the "data_vars" kwarg, but for coordinate variables.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values. If None, raises a ValueError if
the passed Datasets do not create a complete hypercube.
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "no_conflicts"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
combined : xarray.Dataset or xarray.DataArray
Will return a Dataset unless all the inputs are unnamed DataArrays, in which case a
DataArray will be returned.
See also
--------
concat
merge
combine_nested
Examples
--------
Combining two datasets using their common dimension coordinates. Notice
they are concatenated based on the values in their dimension coordinates,
not on their position in the list passed to `combine_by_coords`.
>>> x1 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [0, 1], "x": [10, 20, 30]},
... )
>>> x2 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [10, 20, 30]},
... )
>>> x3 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [40, 50, 60]},
... )
>>> x1
<xarray.Dataset> Size: 136B
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 16B 0 1
* x (x) int64 24B 10 20 30
Data variables:
temperature (y, x) float64 48B 10.98 14.3 12.06 10.9 8.473 12.92
precipitation (y, x) float64 48B 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289
>>> x2
<xarray.Dataset> Size: 136B
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 16B 2 3
* x (x) int64 24B 10 20 30
Data variables:
temperature (y, x) float64 48B 11.36 18.51 1.421 1.743 0.4044 16.65
precipitation (y, x) float64 48B 0.7782 0.87 0.9786 0.7992 0.4615 0.7805
>>> x3
<xarray.Dataset> Size: 136B
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 16B 2 3
* x (x) int64 24B 40 50 60
Data variables:
temperature (y, x) float64 48B 2.365 12.8 2.867 18.89 10.44 8.293
precipitation (y, x) float64 48B 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x2, x1])
<xarray.Dataset> Size: 248B
Dimensions: (y: 4, x: 3)
Coordinates:
* y (y) int64 32B 0 1 2 3
* x (x) int64 24B 10 20 30
Data variables:
temperature (y, x) float64 96B 10.98 14.3 12.06 ... 1.743 0.4044 16.65
precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.4615 0.7805
>>> xr.combine_by_coords([x3, x1])
<xarray.Dataset> Size: 464B
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 32B 0 1 2 3
* x (x) int64 48B 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293
precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
>>> xr.combine_by_coords([x3, x1], join="override")
<xarray.Dataset> Size: 256B
Dimensions: (y: 2, x: 6)
Coordinates:
* y (y) int64 16B 0 1
* x (x) int64 48B 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 96B 10.98 14.3 12.06 ... 18.89 10.44 8.293
precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
>>> xr.combine_by_coords([x1, x2, x3])
<xarray.Dataset> Size: 464B
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 32B 0 1 2 3
* x (x) int64 48B 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293
precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
You can also combine DataArray objects, but the behaviour will differ depending on
whether or not the DataArrays are named. If all DataArrays are named then they will
be promoted to Datasets before combining, and then the resultant Dataset will be
returned, e.g.
>>> named_da1 = xr.DataArray(
... name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x"
... )
>>> named_da1
<xarray.DataArray 'a' (x: 2)> Size: 16B
array([1., 2.])
Coordinates:
* x (x) int64 16B 0 1
>>> named_da2 = xr.DataArray(
... name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x"
... )
>>> named_da2
<xarray.DataArray 'a' (x: 2)> Size: 16B
array([3., 4.])
Coordinates:
* x (x) int64 16B 2 3
>>> xr.combine_by_coords([named_da1, named_da2])
<xarray.Dataset> Size: 64B
Dimensions: (x: 4)
Coordinates:
* x (x) int64 32B 0 1 2 3
Data variables:
a (x) float64 32B 1.0 2.0 3.0 4.0
If all the DataArrays are unnamed, a single DataArray will be returned, e.g.
>>> unnamed_da1 = xr.DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x")
>>> unnamed_da2 = xr.DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x")
>>> xr.combine_by_coords([unnamed_da1, unnamed_da2])
<xarray.DataArray (x: 4)> Size: 32B
array([1., 2., 3., 4.])
Coordinates:
* x (x) int64 32B 0 1 2 3
Finally, if you attempt to combine a mix of unnamed DataArrays with either named
DataArrays or Datasets, a ValueError will be raised (as this is an ambiguous operation).
"""
| /usr/src/app/target_test_cases/failed_tests_combine_by_coords.txt | def combine_by_coords(
data_objects: Iterable[Dataset | DataArray] = [],
compat: CompatOptions = "no_conflicts",
data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
coords: str = "different",
fill_value: object = dtypes.NA,
join: JoinOptions = "outer",
combine_attrs: CombineAttrsOptions = "no_conflicts",
) -> Dataset | DataArray:
"""
Attempt to auto-magically combine the given datasets (or data arrays)
into one by using dimension coordinates.
This function attempts to combine a group of datasets along any number of
dimensions into a single entity by inspecting coords and metadata and using
a combination of concat and merge.
Will attempt to order the datasets such that the values in their dimension
coordinates are monotonic along all dimensions. If it cannot determine the
order in which to concatenate the datasets, it will raise a ValueError.
Non-coordinate dimensions will be ignored, as will any coordinate
dimensions which do not vary between each dataset.
Aligns coordinates, but different variables on datasets can cause it
to fail under some scenarios. In complex cases, you may need to clean up
your data and use concat/merge explicitly (also see `combine_nested`).
Works well if, for example, you have N years of data and M data variables,
and each combination of a distinct time period and set of data variables is
saved as its own dataset. Also useful for if you have a simulation which is
parallelized in multiple dimensions, but has global coordinates saved in
each file specifying the positions of points within the global domain.
Parameters
----------
data_objects : Iterable of Datasets or DataArrays
Data objects to combine.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
data_vars : {"minimal", "different", "all" or list of str}, optional
These data variables will be concatenated together:
- "minimal": Only data variables in which the dimension already
appears are included.
- "different": Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
- "all": All data variables will be concatenated.
- list of str: The listed data variables will be concatenated, in
addition to the "minimal" data variables.
If objects are DataArrays, `data_vars` must be "all".
coords : {"minimal", "different", "all"} or list of str, optional
As per the "data_vars" kwarg, but for coordinate variables.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values. If None, raises a ValueError if
the passed Datasets do not create a complete hypercube.
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "no_conflicts"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
combined : xarray.Dataset or xarray.DataArray
Will return a Dataset unless all the inputs are unnamed DataArrays, in which case a
DataArray will be returned.
See also
--------
concat
merge
combine_nested
Examples
--------
Combining two datasets using their common dimension coordinates. Notice
they are concatenated based on the values in their dimension coordinates,
not on their position in the list passed to `combine_by_coords`.
>>> x1 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [0, 1], "x": [10, 20, 30]},
... )
>>> x2 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [10, 20, 30]},
... )
>>> x3 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [40, 50, 60]},
... )
>>> x1
<xarray.Dataset> Size: 136B
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 16B 0 1
* x (x) int64 24B 10 20 30
Data variables:
temperature (y, x) float64 48B 10.98 14.3 12.06 10.9 8.473 12.92
precipitation (y, x) float64 48B 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289
>>> x2
<xarray.Dataset> Size: 136B
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 16B 2 3
* x (x) int64 24B 10 20 30
Data variables:
temperature (y, x) float64 48B 11.36 18.51 1.421 1.743 0.4044 16.65
precipitation (y, x) float64 48B 0.7782 0.87 0.9786 0.7992 0.4615 0.7805
>>> x3
<xarray.Dataset> Size: 136B
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 16B 2 3
* x (x) int64 24B 40 50 60
Data variables:
temperature (y, x) float64 48B 2.365 12.8 2.867 18.89 10.44 8.293
precipitation (y, x) float64 48B 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x2, x1])
<xarray.Dataset> Size: 248B
Dimensions: (y: 4, x: 3)
Coordinates:
* y (y) int64 32B 0 1 2 3
* x (x) int64 24B 10 20 30
Data variables:
temperature (y, x) float64 96B 10.98 14.3 12.06 ... 1.743 0.4044 16.65
precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.4615 0.7805
>>> xr.combine_by_coords([x3, x1])
<xarray.Dataset> Size: 464B
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 32B 0 1 2 3
* x (x) int64 48B 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293
precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
>>> xr.combine_by_coords([x3, x1], join="override")
<xarray.Dataset> Size: 256B
Dimensions: (y: 2, x: 6)
Coordinates:
* y (y) int64 16B 0 1
* x (x) int64 48B 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 96B 10.98 14.3 12.06 ... 18.89 10.44 8.293
precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
>>> xr.combine_by_coords([x1, x2, x3])
<xarray.Dataset> Size: 464B
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 32B 0 1 2 3
* x (x) int64 48B 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293
precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
You can also combine DataArray objects, but the behaviour will differ depending on
whether or not the DataArrays are named. If all DataArrays are named then they will
be promoted to Datasets before combining, and then the resultant Dataset will be
returned, e.g.
>>> named_da1 = xr.DataArray(
... name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x"
... )
>>> named_da1
<xarray.DataArray 'a' (x: 2)> Size: 16B
array([1., 2.])
Coordinates:
* x (x) int64 16B 0 1
>>> named_da2 = xr.DataArray(
... name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x"
... )
>>> named_da2
<xarray.DataArray 'a' (x: 2)> Size: 16B
array([3., 4.])
Coordinates:
* x (x) int64 16B 2 3
>>> xr.combine_by_coords([named_da1, named_da2])
<xarray.Dataset> Size: 64B
Dimensions: (x: 4)
Coordinates:
* x (x) int64 32B 0 1 2 3
Data variables:
a (x) float64 32B 1.0 2.0 3.0 4.0
If all the DataArrays are unnamed, a single DataArray will be returned, e.g.
>>> unnamed_da1 = xr.DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x")
>>> unnamed_da2 = xr.DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x")
>>> xr.combine_by_coords([unnamed_da1, unnamed_da2])
<xarray.DataArray (x: 4)> Size: 32B
array([1., 2., 3., 4.])
Coordinates:
* x (x) int64 32B 0 1 2 3
Finally, if you attempt to combine a mix of unnamed DataArrays with either named
DataArrays or Datasets, a ValueError will be raised (as this is an ambiguous operation).
"""
if not data_objects:
return Dataset()
objs_are_unnamed_dataarrays = [
isinstance(data_object, DataArray) and data_object.name is None
for data_object in data_objects
]
if any(objs_are_unnamed_dataarrays):
if all(objs_are_unnamed_dataarrays):
# Combine into a single larger DataArray
temp_datasets = [
unnamed_dataarray._to_temp_dataset()
for unnamed_dataarray in data_objects
]
combined_temp_dataset = _combine_single_variable_hypercube(
temp_datasets,
fill_value=fill_value,
data_vars=data_vars,
coords=coords,
compat=compat,
join=join,
combine_attrs=combine_attrs,
)
return DataArray()._from_temp_dataset(combined_temp_dataset)
else:
# Must be a mix of unnamed dataarrays with either named dataarrays or with datasets
# Can't combine these as we wouldn't know whether to merge or concatenate the arrays
raise ValueError(
"Can't automatically combine unnamed DataArrays with either named DataArrays or Datasets."
)
else:
# Promote any named DataArrays to single-variable Datasets to simplify combining
data_objects = [
obj.to_dataset() if isinstance(obj, DataArray) else obj
for obj in data_objects
]
# Group by data vars
sorted_datasets = sorted(data_objects, key=vars_as_keys)
grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)
# Perform the multidimensional combine on each group of data variables
# before merging back together
concatenated_grouped_by_data_vars = tuple(
_combine_single_variable_hypercube(
tuple(datasets_with_same_vars),
fill_value=fill_value,
data_vars=data_vars,
coords=coords,
compat=compat,
join=join,
combine_attrs=combine_attrs,
)
for vars, datasets_with_same_vars in grouped_by_vars
)
return merge(
concatenated_grouped_by_data_vars,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
| combine_by_coords |
xarray | 26 | xarray/core/combine.py | def combine_nested(
datasets: DATASET_HYPERCUBE,
concat_dim: str | DataArray | None | Sequence[str | DataArray | pd.Index | None],
compat: str = "no_conflicts",
data_vars: str = "all",
coords: str = "different",
fill_value: object = dtypes.NA,
join: JoinOptions = "outer",
combine_attrs: CombineAttrsOptions = "drop",
) -> Dataset:
"""
Explicitly combine an N-dimensional grid of datasets into one by using a
succession of concat and merge operations along each dimension of the grid.
Does not sort the supplied datasets under any circumstances, so the
datasets must be passed in the order you wish them to be concatenated. It
does align coordinates, but different variables on datasets can cause it to
fail under some scenarios. In complex cases, you may need to clean up your
data and use concat/merge explicitly.
To concatenate along multiple dimensions the datasets must be passed as a
nested list-of-lists, with a depth equal to the length of ``concat_dims``.
``combine_nested`` will concatenate along the top-level list first.
Useful for combining datasets from a set of nested directories, or for
collecting the output of a simulation parallelized along multiple
dimensions.
Parameters
----------
datasets : list or nested list of Dataset
Dataset objects to combine.
If concatenation or merging along more than one dimension is desired,
then datasets must be supplied in a nested list-of-lists.
concat_dim : str, or list of str, DataArray, Index or None
Dimensions along which to concatenate variables, as used by
:py:func:`xarray.concat`.
Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation
and merge instead along a particular dimension.
The position of ``None`` in the list specifies the dimension of the
nested-list input along which to merge.
Must be the same length as the depth of the list passed to
``datasets``.
compat : {"identical", "equals", "broadcast_equals", \
"no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential merge conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
data_vars : {"minimal", "different", "all" or list of str}, optional
Details are in the documentation of concat
coords : {"minimal", "different", "all" or list of str}, optional
Details are in the documentation of concat
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values.
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
combined : xarray.Dataset
Examples
--------
A common task is collecting data from a parallelized simulation in which
each process wrote out to a separate file. A domain which was decomposed
into 4 parts, 2 each along both the x and y axes, requires organising the
datasets into a doubly-nested list, e.g:
>>> x1y1 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x1y1
<xarray.Dataset> Size: 64B
Dimensions: (x: 2, y: 2)
Dimensions without coordinates: x, y
Data variables:
temperature (x, y) float64 32B 1.764 0.4002 0.9787 2.241
precipitation (x, y) float64 32B 1.868 -0.9773 0.9501 -0.1514
>>> x1y2 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x2y1 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x2y2 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]]
>>> combined = xr.combine_nested(ds_grid, concat_dim=["x", "y"])
>>> combined
<xarray.Dataset> Size: 256B
Dimensions: (x: 4, y: 4)
Dimensions without coordinates: x, y
Data variables:
temperature (x, y) float64 128B 1.764 0.4002 -0.1032 ... 0.04576 -0.1872
precipitation (x, y) float64 128B 1.868 -0.9773 0.761 ... 0.1549 0.3782
``combine_nested`` can also be used to explicitly merge datasets with
different variables. For example if we have 4 datasets, which are divided
along two times, and contain two different variables, we can pass ``None``
to ``concat_dim`` to specify the dimension of the nested list over which
we wish to use ``merge`` instead of ``concat``:
>>> t1temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
>>> t1temp
<xarray.Dataset> Size: 40B
Dimensions: (t: 5)
Dimensions without coordinates: t
Data variables:
temperature (t) float64 40B -0.8878 -1.981 -0.3479 0.1563 1.23
>>> t1precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
>>> t1precip
<xarray.Dataset> Size: 40B
Dimensions: (t: 5)
Dimensions without coordinates: t
Data variables:
precipitation (t) float64 40B 1.202 -0.3873 -0.3023 -1.049 -1.42
>>> t2temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
>>> t2precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
>>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]]
>>> combined = xr.combine_nested(ds_grid, concat_dim=["t", None])
>>> combined
<xarray.Dataset> Size: 160B
Dimensions: (t: 10)
Dimensions without coordinates: t
Data variables:
temperature (t) float64 80B -0.8878 -1.981 -0.3479 ... -0.4381 -1.253
precipitation (t) float64 80B 1.202 -0.3873 -0.3023 ... -0.8955 0.3869
See also
--------
concat
merge
"""
| /usr/src/app/target_test_cases/failed_tests_combine_nested.txt | def combine_nested(
datasets: DATASET_HYPERCUBE,
concat_dim: str | DataArray | None | Sequence[str | DataArray | pd.Index | None],
compat: str = "no_conflicts",
data_vars: str = "all",
coords: str = "different",
fill_value: object = dtypes.NA,
join: JoinOptions = "outer",
combine_attrs: CombineAttrsOptions = "drop",
) -> Dataset:
"""
Explicitly combine an N-dimensional grid of datasets into one by using a
succession of concat and merge operations along each dimension of the grid.
Does not sort the supplied datasets under any circumstances, so the
datasets must be passed in the order you wish them to be concatenated. It
does align coordinates, but different variables on datasets can cause it to
fail under some scenarios. In complex cases, you may need to clean up your
data and use concat/merge explicitly.
To concatenate along multiple dimensions the datasets must be passed as a
nested list-of-lists, with a depth equal to the length of ``concat_dims``.
``combine_nested`` will concatenate along the top-level list first.
Useful for combining datasets from a set of nested directories, or for
collecting the output of a simulation parallelized along multiple
dimensions.
Parameters
----------
datasets : list or nested list of Dataset
Dataset objects to combine.
If concatenation or merging along more than one dimension is desired,
then datasets must be supplied in a nested list-of-lists.
concat_dim : str, or list of str, DataArray, Index or None
Dimensions along which to concatenate variables, as used by
:py:func:`xarray.concat`.
Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation
and merge instead along a particular dimension.
The position of ``None`` in the list specifies the dimension of the
nested-list input along which to merge.
Must be the same length as the depth of the list passed to
``datasets``.
compat : {"identical", "equals", "broadcast_equals", \
"no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential merge conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
data_vars : {"minimal", "different", "all" or list of str}, optional
Details are in the documentation of concat
coords : {"minimal", "different", "all" or list of str}, optional
Details are in the documentation of concat
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values.
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
combined : xarray.Dataset
Examples
--------
A common task is collecting data from a parallelized simulation in which
each process wrote out to a separate file. A domain which was decomposed
into 4 parts, 2 each along both the x and y axes, requires organising the
datasets into a doubly-nested list, e.g:
>>> x1y1 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x1y1
<xarray.Dataset> Size: 64B
Dimensions: (x: 2, y: 2)
Dimensions without coordinates: x, y
Data variables:
temperature (x, y) float64 32B 1.764 0.4002 0.9787 2.241
precipitation (x, y) float64 32B 1.868 -0.9773 0.9501 -0.1514
>>> x1y2 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x2y1 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x2y2 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]]
>>> combined = xr.combine_nested(ds_grid, concat_dim=["x", "y"])
>>> combined
<xarray.Dataset> Size: 256B
Dimensions: (x: 4, y: 4)
Dimensions without coordinates: x, y
Data variables:
temperature (x, y) float64 128B 1.764 0.4002 -0.1032 ... 0.04576 -0.1872
precipitation (x, y) float64 128B 1.868 -0.9773 0.761 ... 0.1549 0.3782
``combine_nested`` can also be used to explicitly merge datasets with
different variables. For example if we have 4 datasets, which are divided
along two times, and contain two different variables, we can pass ``None``
to ``concat_dim`` to specify the dimension of the nested list over which
we wish to use ``merge`` instead of ``concat``:
>>> t1temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
>>> t1temp
<xarray.Dataset> Size: 40B
Dimensions: (t: 5)
Dimensions without coordinates: t
Data variables:
temperature (t) float64 40B -0.8878 -1.981 -0.3479 0.1563 1.23
>>> t1precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
>>> t1precip
<xarray.Dataset> Size: 40B
Dimensions: (t: 5)
Dimensions without coordinates: t
Data variables:
precipitation (t) float64 40B 1.202 -0.3873 -0.3023 -1.049 -1.42
>>> t2temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
>>> t2precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
>>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]]
>>> combined = xr.combine_nested(ds_grid, concat_dim=["t", None])
>>> combined
<xarray.Dataset> Size: 160B
Dimensions: (t: 10)
Dimensions without coordinates: t
Data variables:
temperature (t) float64 80B -0.8878 -1.981 -0.3479 ... -0.4381 -1.253
precipitation (t) float64 80B 1.202 -0.3873 -0.3023 ... -0.8955 0.3869
See also
--------
concat
merge
"""
mixed_datasets_and_arrays = any(
isinstance(obj, Dataset) for obj in iterate_nested(datasets)
) and any(
isinstance(obj, DataArray) and obj.name is None
for obj in iterate_nested(datasets)
)
if mixed_datasets_and_arrays:
raise ValueError("Can't combine datasets with unnamed arrays.")
if isinstance(concat_dim, str | DataArray) or concat_dim is None:
concat_dim = [concat_dim]
# The IDs argument tells _nested_combine that datasets aren't yet sorted
return _nested_combine(
datasets,
concat_dims=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
ids=False,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
| combine_nested |
xarray | 27 | xarray/core/rolling.py | def construct(
self,
window_dim: Hashable | Mapping[Any, Hashable] | None = None,
stride: int | Mapping[Any, int] = 1,
fill_value: Any = dtypes.NA,
keep_attrs: bool | None = None,
**window_dim_kwargs: Hashable,
) -> DataArray:
"""
Convert this rolling object to xr.DataArray,
where the window dimension is stacked as a new dimension
Parameters
----------
window_dim : Hashable or dict-like to Hashable, optional
A mapping from dimension name to the new window dimension names.
stride : int or mapping of int, default: 1
Size of stride for the rolling window.
fill_value : default: dtypes.NA
Filling value to match the dimension size.
keep_attrs : bool, default: None
If True, the attributes (``attrs``) will be copied from the original
object to the new one. If False, the new object will be returned
without attributes. If None uses the global default.
**window_dim_kwargs : Hashable, optional
The keyword arguments form of ``window_dim`` {dim: new_name, ...}.
Returns
-------
DataArray that is a view of the original array. The returned array is
not writeable.
Examples
--------
>>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b"))
>>> rolling = da.rolling(b=3)
>>> rolling.construct("window_dim")
<xarray.DataArray (a: 2, b: 4, window_dim: 3)> Size: 192B
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
Dimensions without coordinates: a, b, window_dim
>>> rolling = da.rolling(b=3, center=True)
>>> rolling.construct("window_dim")
<xarray.DataArray (a: 2, b: 4, window_dim: 3)> Size: 192B
array([[[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.],
[ 2., 3., nan]],
<BLANKLINE>
[[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.],
[ 6., 7., nan]]])
Dimensions without coordinates: a, b, window_dim
"""
| /usr/src/app/target_test_cases/failed_tests_construct.txt | def construct(
self,
window_dim: Hashable | Mapping[Any, Hashable] | None = None,
stride: int | Mapping[Any, int] = 1,
fill_value: Any = dtypes.NA,
keep_attrs: bool | None = None,
**window_dim_kwargs: Hashable,
) -> DataArray:
"""
Convert this rolling object to xr.DataArray,
where the window dimension is stacked as a new dimension
Parameters
----------
window_dim : Hashable or dict-like to Hashable, optional
A mapping from dimension name to the new window dimension names.
stride : int or mapping of int, default: 1
Size of stride for the rolling window.
fill_value : default: dtypes.NA
Filling value to match the dimension size.
keep_attrs : bool, default: None
If True, the attributes (``attrs``) will be copied from the original
object to the new one. If False, the new object will be returned
without attributes. If None uses the global default.
**window_dim_kwargs : Hashable, optional
The keyword arguments form of ``window_dim`` {dim: new_name, ...}.
Returns
-------
DataArray that is a view of the original array. The returned array is
not writeable.
Examples
--------
>>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b"))
>>> rolling = da.rolling(b=3)
>>> rolling.construct("window_dim")
<xarray.DataArray (a: 2, b: 4, window_dim: 3)> Size: 192B
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
Dimensions without coordinates: a, b, window_dim
>>> rolling = da.rolling(b=3, center=True)
>>> rolling.construct("window_dim")
<xarray.DataArray (a: 2, b: 4, window_dim: 3)> Size: 192B
array([[[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.],
[ 2., 3., nan]],
<BLANKLINE>
[[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.],
[ 6., 7., nan]]])
Dimensions without coordinates: a, b, window_dim
"""
return self._construct(
self.obj,
window_dim=window_dim,
stride=stride,
fill_value=fill_value,
keep_attrs=keep_attrs,
**window_dim_kwargs,
)
| construct |
xarray | 28 | xarray/coding/calendar_ops.py | def convert_calendar(
obj,
calendar,
dim="time",
align_on=None,
missing=None,
use_cftime=None,
):
"""Transform a time-indexed Dataset or DataArray to one that uses another calendar.
This function only converts the individual timestamps; it does not modify any
data except in dropping invalid/surplus dates, or inserting values for missing dates.
If the source and target calendars are both from a standard type, only the
type of the time array is modified. When converting to a calendar with a
leap year from to a calendar without a leap year, the 29th of February will
be removed from the array. In the other direction the 29th of February will
be missing in the output, unless `missing` is specified, in which case that
value is inserted. For conversions involving the `360_day` calendar, see Notes.
This method is safe to use with sub-daily data as it doesn't touch the time
part of the timestamps.
Parameters
----------
obj : DataArray or Dataset
Input DataArray or Dataset with a time coordinate of a valid dtype
(:py:class:`numpy.datetime64` or :py:class:`cftime.datetime`).
calendar : str
The target calendar name.
dim : str
Name of the time coordinate in the input DataArray or Dataset.
align_on : {None, 'date', 'year', 'random'}
Must be specified when either the source or target is a `"360_day"`
calendar; ignored otherwise. See Notes.
missing : any, optional
By default, i.e. if the value is None, this method will simply attempt
to convert the dates in the source calendar to the same dates in the
target calendar, and drop any of those that are not possible to
represent. If a value is provided, a new time coordinate will be
created in the target calendar with the same frequency as the original
time coordinate; for any dates that are not present in the source, the
data will be filled with this value. Note that using this mode requires
that the source data have an inferable frequency; for more information
see :py:func:`xarray.infer_freq`. For certain frequency, source, and
target calendar combinations, this could result in many missing values, see notes.
use_cftime : bool, optional
Whether to use cftime objects in the output, only used if `calendar` is
one of {"proleptic_gregorian", "gregorian" or "standard"}.
If True, the new time axis uses cftime objects.
If None (default), it uses :py:class:`numpy.datetime64` values if the date
range permits it, and :py:class:`cftime.datetime` objects if not.
If False, it uses :py:class:`numpy.datetime64` or fails.
Returns
-------
Copy of source with the time coordinate converted to the target calendar.
If `missing` was None (default), invalid dates in the new calendar are
dropped, but missing dates are not inserted.
If `missing` was given, the new data is reindexed to have a time axis
with the same frequency as the source, but in the new calendar; any
missing datapoints are filled with `missing`.
Notes
-----
Passing a value to `missing` is only usable if the source's time coordinate as an
inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate
if the target coordinate, generated from this frequency, has dates equivalent to the
source. It is usually **not** appropriate to use this mode with:
- Period-end frequencies: 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS'
- Sub-monthly frequencies that do not divide a day evenly: 'W', 'nD' where `n != 1`
or 'mH' where 24 % m != 0).
If one of the source or target calendars is `"360_day"`, `align_on` must
be specified and two options are offered.
"year"
The dates are translated according to their relative position in the year,
ignoring their original month and day information, meaning that the
missing/surplus days are added/removed at regular intervals.
From a `360_day` to a standard calendar, the output will be missing the
following dates (day of year in parentheses):
To a leap year:
January 31st (31), March 31st (91), June 1st (153), July 31st (213),
September 31st (275) and November 30th (335).
To a non-leap year:
February 6th (36), April 19th (109), July 2nd (183),
September 12th (255), November 25th (329).
From a standard calendar to a `"360_day"`, the following dates in the
source array will be dropped:
From a leap year:
January 31st (31), April 1st (92), June 1st (153), August 1st (214),
September 31st (275), December 1st (336)
From a non-leap year:
February 6th (37), April 20th (110), July 2nd (183),
September 13th (256), November 25th (329)
This option is best used on daily and subdaily data.
"date"
The month/day information is conserved and invalid dates are dropped
from the output. This means that when converting from a `"360_day"` to a
standard calendar, all 31sts (Jan, March, May, July, August, October and
December) will be missing as there is no equivalent dates in the
`"360_day"` calendar and the 29th (on non-leap years) and 30th of February
will be dropped as there are no equivalent dates in a standard calendar.
This option is best used with data on a frequency coarser than daily.
"random"
Similar to "year", each day of year of the source is mapped to another day of year
of the target. However, instead of having always the same missing days according
the source and target years, here 5 days are chosen randomly, one for each fifth
of the year. However, February 29th is always missing when converting to a leap year,
or its value is dropped when converting from a leap year. This is similar to the method
used in the LOCA dataset (see Pierce, Cayan, and Thrasher (2014). doi:10.1175/JHM-D-14-0082.1).
This option is best used on daily data.
"""
| /usr/src/app/target_test_cases/failed_tests_convert_calendar.txt | def convert_calendar(
obj,
calendar,
dim="time",
align_on=None,
missing=None,
use_cftime=None,
):
"""Transform a time-indexed Dataset or DataArray to one that uses another calendar.
This function only converts the individual timestamps; it does not modify any
data except in dropping invalid/surplus dates, or inserting values for missing dates.
If the source and target calendars are both from a standard type, only the
type of the time array is modified. When converting to a calendar with a
leap year from to a calendar without a leap year, the 29th of February will
be removed from the array. In the other direction the 29th of February will
be missing in the output, unless `missing` is specified, in which case that
value is inserted. For conversions involving the `360_day` calendar, see Notes.
This method is safe to use with sub-daily data as it doesn't touch the time
part of the timestamps.
Parameters
----------
obj : DataArray or Dataset
Input DataArray or Dataset with a time coordinate of a valid dtype
(:py:class:`numpy.datetime64` or :py:class:`cftime.datetime`).
calendar : str
The target calendar name.
dim : str
Name of the time coordinate in the input DataArray or Dataset.
align_on : {None, 'date', 'year', 'random'}
Must be specified when either the source or target is a `"360_day"`
calendar; ignored otherwise. See Notes.
missing : any, optional
By default, i.e. if the value is None, this method will simply attempt
to convert the dates in the source calendar to the same dates in the
target calendar, and drop any of those that are not possible to
represent. If a value is provided, a new time coordinate will be
created in the target calendar with the same frequency as the original
time coordinate; for any dates that are not present in the source, the
data will be filled with this value. Note that using this mode requires
that the source data have an inferable frequency; for more information
see :py:func:`xarray.infer_freq`. For certain frequency, source, and
target calendar combinations, this could result in many missing values, see notes.
use_cftime : bool, optional
Whether to use cftime objects in the output, only used if `calendar` is
one of {"proleptic_gregorian", "gregorian" or "standard"}.
If True, the new time axis uses cftime objects.
If None (default), it uses :py:class:`numpy.datetime64` values if the date
range permits it, and :py:class:`cftime.datetime` objects if not.
If False, it uses :py:class:`numpy.datetime64` or fails.
Returns
-------
Copy of source with the time coordinate converted to the target calendar.
If `missing` was None (default), invalid dates in the new calendar are
dropped, but missing dates are not inserted.
If `missing` was given, the new data is reindexed to have a time axis
with the same frequency as the source, but in the new calendar; any
missing datapoints are filled with `missing`.
Notes
-----
Passing a value to `missing` is only usable if the source's time coordinate as an
inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate
if the target coordinate, generated from this frequency, has dates equivalent to the
source. It is usually **not** appropriate to use this mode with:
- Period-end frequencies: 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS'
- Sub-monthly frequencies that do not divide a day evenly: 'W', 'nD' where `n != 1`
or 'mH' where 24 % m != 0).
If one of the source or target calendars is `"360_day"`, `align_on` must
be specified and two options are offered.
"year"
The dates are translated according to their relative position in the year,
ignoring their original month and day information, meaning that the
missing/surplus days are added/removed at regular intervals.
From a `360_day` to a standard calendar, the output will be missing the
following dates (day of year in parentheses):
To a leap year:
January 31st (31), March 31st (91), June 1st (153), July 31st (213),
September 31st (275) and November 30th (335).
To a non-leap year:
February 6th (36), April 19th (109), July 2nd (183),
September 12th (255), November 25th (329).
From a standard calendar to a `"360_day"`, the following dates in the
source array will be dropped:
From a leap year:
January 31st (31), April 1st (92), June 1st (153), August 1st (214),
September 31st (275), December 1st (336)
From a non-leap year:
February 6th (37), April 20th (110), July 2nd (183),
September 13th (256), November 25th (329)
This option is best used on daily and subdaily data.
"date"
The month/day information is conserved and invalid dates are dropped
from the output. This means that when converting from a `"360_day"` to a
standard calendar, all 31sts (Jan, March, May, July, August, October and
December) will be missing as there is no equivalent dates in the
`"360_day"` calendar and the 29th (on non-leap years) and 30th of February
will be dropped as there are no equivalent dates in a standard calendar.
This option is best used with data on a frequency coarser than daily.
"random"
Similar to "year", each day of year of the source is mapped to another day of year
of the target. However, instead of having always the same missing days according
the source and target years, here 5 days are chosen randomly, one for each fifth
of the year. However, February 29th is always missing when converting to a leap year,
or its value is dropped when converting from a leap year. This is similar to the method
used in the LOCA dataset (see Pierce, Cayan, and Thrasher (2014). doi:10.1175/JHM-D-14-0082.1).
This option is best used on daily data.
"""
from xarray.core.dataarray import DataArray
time = obj[dim]
if not _contains_datetime_like_objects(time.variable):
raise ValueError(f"Coordinate {dim} must contain datetime objects.")
use_cftime = _should_cftime_be_used(time, calendar, use_cftime)
source_calendar = time.dt.calendar
# Do nothing if request calendar is the same as the source
# AND source is np XOR use_cftime
if source_calendar == calendar and is_np_datetime_like(time.dtype) ^ use_cftime:
return obj
if (time.dt.year == 0).any() and calendar in _CALENDARS_WITHOUT_YEAR_ZERO:
raise ValueError(
f"Source time coordinate contains dates with year 0, which is not supported by target calendar {calendar}."
)
if (source_calendar == "360_day" or calendar == "360_day") and align_on is None:
raise ValueError(
"Argument `align_on` must be specified with either 'date' or "
"'year' when converting to or from a '360_day' calendar."
)
if source_calendar != "360_day" and calendar != "360_day":
align_on = "date"
out = obj.copy()
if align_on in ["year", "random"]:
# Special case for conversion involving 360_day calendar
if align_on == "year":
# Instead of translating dates directly, this tries to keep the position within a year similar.
new_doy = _interpolate_day_of_year(time, target_calendar=calendar)
elif align_on == "random":
# The 5 days to remove are randomly chosen, one for each of the five 72-days periods of the year.
new_doy = time.groupby(f"{dim}.year").map(
_random_day_of_year, target_calendar=calendar, use_cftime=use_cftime
)
# Convert the source datetimes, but override the day of year with our new day of years.
out[dim] = DataArray(
[
_convert_to_new_calendar_with_new_day_of_year(
date, newdoy, calendar, use_cftime
)
for date, newdoy in zip(time.variable._data.array, new_doy, strict=True)
],
dims=(dim,),
name=dim,
)
# Remove duplicate timestamps, happens when reducing the number of days
out = out.isel({dim: np.unique(out[dim], return_index=True)[1]})
elif align_on == "date":
new_times = convert_times(
time.data,
get_date_type(calendar, use_cftime=use_cftime),
raise_on_invalid=False,
)
out[dim] = new_times
# Remove NaN that where put on invalid dates in target calendar
out = out.where(out[dim].notnull(), drop=True)
if use_cftime:
# Reassign times to ensure time index of output is a CFTimeIndex
# (previously it was an Index due to the presence of NaN values).
# Note this is not needed in the case that the output time index is
# a DatetimeIndex, since DatetimeIndexes can handle NaN values.
out[dim] = CFTimeIndex(out[dim].data)
if missing is not None:
time_target = date_range_like(time, calendar=calendar, use_cftime=use_cftime)
out = out.reindex({dim: time_target}, fill_value=missing)
# Copy attrs but remove `calendar` if still present.
out[dim].attrs.update(time.attrs)
out[dim].attrs.pop("calendar", None)
return out
| convert_calendar |
xarray | 29 | xarray/core/computation.py | def corr(
da_a: T_DataArray,
da_b: T_DataArray,
dim: Dims = None,
weights: T_DataArray | None = None,
) -> T_DataArray:
"""
Compute the Pearson correlation coefficient between
two DataArray objects along a shared dimension.
Parameters
----------
da_a : DataArray
Array to compute.
da_b : DataArray
Array to compute.
dim : str, iterable of hashable, "..." or None, optional
The dimension along which the correlation will be computed
weights : DataArray, optional
Array of weights.
Returns
-------
correlation: DataArray
See Also
--------
pandas.Series.corr : corresponding pandas function
xarray.cov : underlying covariance function
Examples
--------
>>> from xarray import DataArray
>>> da_a = DataArray(
... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_a
<xarray.DataArray (space: 3, time: 3)> Size: 72B
array([[1. , 2. , 3. ],
[0.1, 0.2, 0.3],
[3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
>>> da_b = DataArray(
... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_b
<xarray.DataArray (space: 3, time: 3)> Size: 72B
array([[ 0.2, 0.4, 0.6],
[15. , 10. , 5. ],
[ 3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
>>> xr.corr(da_a, da_b)
<xarray.DataArray ()> Size: 8B
array(-0.57087777)
>>> xr.corr(da_a, da_b, dim="time")
<xarray.DataArray (space: 3)> Size: 24B
array([ 1., -1., 1.])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
>>> weights = DataArray(
... [4, 2, 1],
... dims=("space"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ],
... )
>>> weights
<xarray.DataArray (space: 3)> Size: 24B
array([4, 2, 1])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
>>> xr.corr(da_a, da_b, dim="space", weights=weights)
<xarray.DataArray (time: 3)> Size: 24B
array([-0.50240504, -0.83215028, -0.99057446])
Coordinates:
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
"""
| /usr/src/app/target_test_cases/failed_tests_corr.txt | def corr(
da_a: T_DataArray,
da_b: T_DataArray,
dim: Dims = None,
weights: T_DataArray | None = None,
) -> T_DataArray:
"""
Compute the Pearson correlation coefficient between
two DataArray objects along a shared dimension.
Parameters
----------
da_a : DataArray
Array to compute.
da_b : DataArray
Array to compute.
dim : str, iterable of hashable, "..." or None, optional
The dimension along which the correlation will be computed
weights : DataArray, optional
Array of weights.
Returns
-------
correlation: DataArray
See Also
--------
pandas.Series.corr : corresponding pandas function
xarray.cov : underlying covariance function
Examples
--------
>>> from xarray import DataArray
>>> da_a = DataArray(
... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_a
<xarray.DataArray (space: 3, time: 3)> Size: 72B
array([[1. , 2. , 3. ],
[0.1, 0.2, 0.3],
[3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
>>> da_b = DataArray(
... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_b
<xarray.DataArray (space: 3, time: 3)> Size: 72B
array([[ 0.2, 0.4, 0.6],
[15. , 10. , 5. ],
[ 3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
>>> xr.corr(da_a, da_b)
<xarray.DataArray ()> Size: 8B
array(-0.57087777)
>>> xr.corr(da_a, da_b, dim="time")
<xarray.DataArray (space: 3)> Size: 24B
array([ 1., -1., 1.])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
>>> weights = DataArray(
... [4, 2, 1],
... dims=("space"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ],
... )
>>> weights
<xarray.DataArray (space: 3)> Size: 24B
array([4, 2, 1])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
>>> xr.corr(da_a, da_b, dim="space", weights=weights)
<xarray.DataArray (time: 3)> Size: 24B
array([-0.50240504, -0.83215028, -0.99057446])
Coordinates:
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
"""
from xarray.core.dataarray import DataArray
if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]):
raise TypeError(
"Only xr.DataArray is supported."
f"Given {[type(arr) for arr in [da_a, da_b]]}."
)
if weights is not None:
if not isinstance(weights, DataArray):
raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.")
return _cov_corr(da_a, da_b, weights=weights, dim=dim, method="corr")
| corr |
xarray | 30 | xarray/core/_aggregations.py | def count(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``count`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``count`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``count`` applied to its data and the
indicated dimension(s) removed
See Also
--------
pandas.DataFrame.count
dask.dataframe.DataFrame.count
DataArray.count
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.count()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da int64 8B 5
"""
| /usr/src/app/target_test_cases/failed_tests_count.txt | def count(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``count`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``count`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``count`` applied to its data and the
indicated dimension(s) removed
See Also
--------
pandas.DataFrame.count
dask.dataframe.DataFrame.count
DataArray.count
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.count()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da int64 8B 5
"""
return self.reduce(
duck_array_ops.count,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
| count |
xarray | 31 | xarray/core/computation.py | def cov(
da_a: T_DataArray,
da_b: T_DataArray,
dim: Dims = None,
ddof: int = 1,
weights: T_DataArray | None = None,
) -> T_DataArray:
"""
Compute covariance between two DataArray objects along a shared dimension.
Parameters
----------
da_a : DataArray
Array to compute.
da_b : DataArray
Array to compute.
dim : str, iterable of hashable, "..." or None, optional
The dimension along which the covariance will be computed
ddof : int, default: 1
If ddof=1, covariance is normalized by N-1, giving an unbiased estimate,
else normalization is by N.
weights : DataArray, optional
Array of weights.
Returns
-------
covariance : DataArray
See Also
--------
pandas.Series.cov : corresponding pandas function
xarray.corr : respective function to calculate correlation
Examples
--------
>>> from xarray import DataArray
>>> da_a = DataArray(
... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_a
<xarray.DataArray (space: 3, time: 3)> Size: 72B
array([[1. , 2. , 3. ],
[0.1, 0.2, 0.3],
[3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
>>> da_b = DataArray(
... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_b
<xarray.DataArray (space: 3, time: 3)> Size: 72B
array([[ 0.2, 0.4, 0.6],
[15. , 10. , 5. ],
[ 3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
>>> xr.cov(da_a, da_b)
<xarray.DataArray ()> Size: 8B
array(-3.53055556)
>>> xr.cov(da_a, da_b, dim="time")
<xarray.DataArray (space: 3)> Size: 24B
array([ 0.2 , -0.5 , 1.69333333])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
>>> weights = DataArray(
... [4, 2, 1],
... dims=("space"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ],
... )
>>> weights
<xarray.DataArray (space: 3)> Size: 24B
array([4, 2, 1])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
>>> xr.cov(da_a, da_b, dim="space", weights=weights)
<xarray.DataArray (time: 3)> Size: 24B
array([-4.69346939, -4.49632653, -3.37959184])
Coordinates:
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
"""
| /usr/src/app/target_test_cases/failed_tests_cov.txt | def cov(
da_a: T_DataArray,
da_b: T_DataArray,
dim: Dims = None,
ddof: int = 1,
weights: T_DataArray | None = None,
) -> T_DataArray:
"""
Compute covariance between two DataArray objects along a shared dimension.
Parameters
----------
da_a : DataArray
Array to compute.
da_b : DataArray
Array to compute.
dim : str, iterable of hashable, "..." or None, optional
The dimension along which the covariance will be computed
ddof : int, default: 1
If ddof=1, covariance is normalized by N-1, giving an unbiased estimate,
else normalization is by N.
weights : DataArray, optional
Array of weights.
Returns
-------
covariance : DataArray
See Also
--------
pandas.Series.cov : corresponding pandas function
xarray.corr : respective function to calculate correlation
Examples
--------
>>> from xarray import DataArray
>>> da_a = DataArray(
... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_a
<xarray.DataArray (space: 3, time: 3)> Size: 72B
array([[1. , 2. , 3. ],
[0.1, 0.2, 0.3],
[3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
>>> da_b = DataArray(
... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_b
<xarray.DataArray (space: 3, time: 3)> Size: 72B
array([[ 0.2, 0.4, 0.6],
[15. , 10. , 5. ],
[ 3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
>>> xr.cov(da_a, da_b)
<xarray.DataArray ()> Size: 8B
array(-3.53055556)
>>> xr.cov(da_a, da_b, dim="time")
<xarray.DataArray (space: 3)> Size: 24B
array([ 0.2 , -0.5 , 1.69333333])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
>>> weights = DataArray(
... [4, 2, 1],
... dims=("space"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ],
... )
>>> weights
<xarray.DataArray (space: 3)> Size: 24B
array([4, 2, 1])
Coordinates:
* space (space) <U2 24B 'IA' 'IL' 'IN'
>>> xr.cov(da_a, da_b, dim="space", weights=weights)
<xarray.DataArray (time: 3)> Size: 24B
array([-4.69346939, -4.49632653, -3.37959184])
Coordinates:
* time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03
"""
from xarray.core.dataarray import DataArray
if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]):
raise TypeError(
"Only xr.DataArray is supported."
f"Given {[type(arr) for arr in [da_a, da_b]]}."
)
if weights is not None:
if not isinstance(weights, DataArray):
raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.")
return _cov_corr(da_a, da_b, weights=weights, dim=dim, ddof=ddof, method="cov")
| cov |
xarray | 32 | xarray/tests/conftest.py | def create_test_datatree():
"""
Create a test datatree with this structure:
<xarray.DataTree>
|-- set1
| |-- <xarray.Dataset>
| | Dimensions: ()
| | Data variables:
| | a int64 0
| | b int64 1
| |-- set1
| |-- set2
|-- set2
| |-- <xarray.Dataset>
| | Dimensions: (x: 2)
| | Data variables:
| | a (x) int64 2, 3
| | b (x) int64 0.1, 0.2
| |-- set1
|-- set3
|-- <xarray.Dataset>
| Dimensions: (x: 2, y: 3)
| Data variables:
| a (y) int64 6, 7, 8
| set0 (x) int64 9, 10
The structure has deliberately repeated names of tags, variables, and
dimensions in order to better check for bugs caused by name conflicts.
"""
| /usr/src/app/target_test_cases/failed_tests_create_test_datatree.txt | def create_test_datatree():
"""
Create a test datatree with this structure:
<xarray.DataTree>
|-- set1
| |-- <xarray.Dataset>
| | Dimensions: ()
| | Data variables:
| | a int64 0
| | b int64 1
| |-- set1
| |-- set2
|-- set2
| |-- <xarray.Dataset>
| | Dimensions: (x: 2)
| | Data variables:
| | a (x) int64 2, 3
| | b (x) int64 0.1, 0.2
| |-- set1
|-- set3
|-- <xarray.Dataset>
| Dimensions: (x: 2, y: 3)
| Data variables:
| a (y) int64 6, 7, 8
| set0 (x) int64 9, 10
The structure has deliberately repeated names of tags, variables, and
dimensions in order to better check for bugs caused by name conflicts.
"""
def _create_test_datatree(modify=lambda ds: ds):
set1_data = modify(xr.Dataset({"a": 0, "b": 1}))
set2_data = modify(xr.Dataset({"a": ("x", [2, 3]), "b": ("x", [0.1, 0.2])}))
root_data = modify(xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}))
root = DataTree.from_dict(
{
"/": root_data,
"/set1": set1_data,
"/set1/set1": None,
"/set1/set2": None,
"/set2": set2_data,
"/set2/set1": None,
"/set3": None,
}
)
return root
return _create_test_datatree
| create_test_datatree |
xarray | 33 | xarray/core/computation.py | def cross(
a: DataArray | Variable, b: DataArray | Variable, *, dim: Hashable
) -> DataArray | Variable:
"""
Compute the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector
perpendicular to both `a` and `b`. The vectors in `a` and `b` are
defined by the values along the dimension `dim` and can have sizes
1, 2 or 3. Where the size of either `a` or `b` is
1 or 2, the remaining components of the input vector is assumed to
be zero and the cross product calculated accordingly. In cases where
both input vectors have dimension 2, the z-component of the cross
product is returned.
Parameters
----------
a, b : DataArray or Variable
Components of the first and second vector(s).
dim : hashable
The dimension along which the cross product will be computed.
Must be available in both vectors.
Examples
--------
Vector cross-product with 3 dimensions:
>>> a = xr.DataArray([1, 2, 3])
>>> b = xr.DataArray([4, 5, 6])
>>> xr.cross(a, b, dim="dim_0")
<xarray.DataArray (dim_0: 3)> Size: 24B
array([-3, 6, -3])
Dimensions without coordinates: dim_0
Vector cross-product with 3 dimensions but zeros at the last axis
yields the same results as with 2 dimensions:
>>> a = xr.DataArray([1, 2, 0])
>>> b = xr.DataArray([4, 5, 0])
>>> xr.cross(a, b, dim="dim_0")
<xarray.DataArray (dim_0: 3)> Size: 24B
array([ 0, 0, -3])
Dimensions without coordinates: dim_0
Multiple vector cross-products. Note that the direction of the
cross product vector is defined by the right-hand rule:
>>> a = xr.DataArray(
... [[1, 2, 3], [4, 5, 6]],
... dims=("time", "cartesian"),
... coords=dict(
... time=(["time"], [0, 1]),
... cartesian=(["cartesian"], ["x", "y", "z"]),
... ),
... )
>>> b = xr.DataArray(
... [[4, 5, 6], [1, 2, 3]],
... dims=("time", "cartesian"),
... coords=dict(
... time=(["time"], [0, 1]),
... cartesian=(["cartesian"], ["x", "y", "z"]),
... ),
... )
>>> xr.cross(a, b, dim="cartesian")
<xarray.DataArray (time: 2, cartesian: 3)> Size: 48B
array([[-3, 6, -3],
[ 3, -6, 3]])
Coordinates:
* time (time) int64 16B 0 1
* cartesian (cartesian) <U1 12B 'x' 'y' 'z'
Cross can be called on Datasets by converting to DataArrays and later
back to a Dataset:
>>> ds_a = xr.Dataset(dict(x=("dim_0", [1]), y=("dim_0", [2]), z=("dim_0", [3])))
>>> ds_b = xr.Dataset(dict(x=("dim_0", [4]), y=("dim_0", [5]), z=("dim_0", [6])))
>>> c = xr.cross(
... ds_a.to_dataarray("cartesian"),
... ds_b.to_dataarray("cartesian"),
... dim="cartesian",
... )
>>> c.to_dataset(dim="cartesian")
<xarray.Dataset> Size: 24B
Dimensions: (dim_0: 1)
Dimensions without coordinates: dim_0
Data variables:
x (dim_0) int64 8B -3
y (dim_0) int64 8B 6
z (dim_0) int64 8B -3
See Also
--------
numpy.cross : Corresponding numpy function
"""
| /usr/src/app/target_test_cases/failed_tests_cross.txt | def cross(
a: DataArray | Variable, b: DataArray | Variable, *, dim: Hashable
) -> DataArray | Variable:
"""
Compute the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector
perpendicular to both `a` and `b`. The vectors in `a` and `b` are
defined by the values along the dimension `dim` and can have sizes
1, 2 or 3. Where the size of either `a` or `b` is
1 or 2, the remaining components of the input vector is assumed to
be zero and the cross product calculated accordingly. In cases where
both input vectors have dimension 2, the z-component of the cross
product is returned.
Parameters
----------
a, b : DataArray or Variable
Components of the first and second vector(s).
dim : hashable
The dimension along which the cross product will be computed.
Must be available in both vectors.
Examples
--------
Vector cross-product with 3 dimensions:
>>> a = xr.DataArray([1, 2, 3])
>>> b = xr.DataArray([4, 5, 6])
>>> xr.cross(a, b, dim="dim_0")
<xarray.DataArray (dim_0: 3)> Size: 24B
array([-3, 6, -3])
Dimensions without coordinates: dim_0
Vector cross-product with 3 dimensions but zeros at the last axis
yields the same results as with 2 dimensions:
>>> a = xr.DataArray([1, 2, 0])
>>> b = xr.DataArray([4, 5, 0])
>>> xr.cross(a, b, dim="dim_0")
<xarray.DataArray (dim_0: 3)> Size: 24B
array([ 0, 0, -3])
Dimensions without coordinates: dim_0
Multiple vector cross-products. Note that the direction of the
cross product vector is defined by the right-hand rule:
>>> a = xr.DataArray(
... [[1, 2, 3], [4, 5, 6]],
... dims=("time", "cartesian"),
... coords=dict(
... time=(["time"], [0, 1]),
... cartesian=(["cartesian"], ["x", "y", "z"]),
... ),
... )
>>> b = xr.DataArray(
... [[4, 5, 6], [1, 2, 3]],
... dims=("time", "cartesian"),
... coords=dict(
... time=(["time"], [0, 1]),
... cartesian=(["cartesian"], ["x", "y", "z"]),
... ),
... )
>>> xr.cross(a, b, dim="cartesian")
<xarray.DataArray (time: 2, cartesian: 3)> Size: 48B
array([[-3, 6, -3],
[ 3, -6, 3]])
Coordinates:
* time (time) int64 16B 0 1
* cartesian (cartesian) <U1 12B 'x' 'y' 'z'
Cross can be called on Datasets by converting to DataArrays and later
back to a Dataset:
>>> ds_a = xr.Dataset(dict(x=("dim_0", [1]), y=("dim_0", [2]), z=("dim_0", [3])))
>>> ds_b = xr.Dataset(dict(x=("dim_0", [4]), y=("dim_0", [5]), z=("dim_0", [6])))
>>> c = xr.cross(
... ds_a.to_dataarray("cartesian"),
... ds_b.to_dataarray("cartesian"),
... dim="cartesian",
... )
>>> c.to_dataset(dim="cartesian")
<xarray.Dataset> Size: 24B
Dimensions: (dim_0: 1)
Dimensions without coordinates: dim_0
Data variables:
x (dim_0) int64 8B -3
y (dim_0) int64 8B 6
z (dim_0) int64 8B -3
See Also
--------
numpy.cross : Corresponding numpy function
"""
if dim not in a.dims:
raise ValueError(f"Dimension {dim!r} not on a")
elif dim not in b.dims:
raise ValueError(f"Dimension {dim!r} not on b")
if not 1 <= a.sizes[dim] <= 3:
raise ValueError(
f"The size of {dim!r} on a must be 1, 2, or 3 to be "
f"compatible with a cross product but is {a.sizes[dim]}"
)
elif not 1 <= b.sizes[dim] <= 3:
raise ValueError(
f"The size of {dim!r} on b must be 1, 2, or 3 to be "
f"compatible with a cross product but is {b.sizes[dim]}"
)
all_dims = list(dict.fromkeys(a.dims + b.dims))
if a.sizes[dim] != b.sizes[dim]:
# Arrays have different sizes. Append zeros where the smaller
# array is missing a value, zeros will not affect np.cross:
if (
not isinstance(a, Variable) # Only used to make mypy happy.
and dim in getattr(a, "coords", {})
and not isinstance(b, Variable) # Only used to make mypy happy.
and dim in getattr(b, "coords", {})
):
# If the arrays have coords we know which indexes to fill
# with zeros:
a, b = align(
a,
b,
fill_value=0,
join="outer",
exclude=set(all_dims) - {dim},
)
elif min(a.sizes[dim], b.sizes[dim]) == 2:
# If the array doesn't have coords we can only infer
# that it has composite values if the size is at least 2.
# Once padded, rechunk the padded array because apply_ufunc
# requires core dimensions not to be chunked:
if a.sizes[dim] < b.sizes[dim]:
a = a.pad({dim: (0, 1)}, constant_values=0)
# TODO: Should pad or apply_ufunc handle correct chunking?
a = a.chunk({dim: -1}) if is_chunked_array(a.data) else a
else:
b = b.pad({dim: (0, 1)}, constant_values=0)
# TODO: Should pad or apply_ufunc handle correct chunking?
b = b.chunk({dim: -1}) if is_chunked_array(b.data) else b
else:
raise ValueError(
f"{dim!r} on {'a' if a.sizes[dim] == 1 else 'b'} is incompatible:"
" dimensions without coordinates must have have a length of 2 or 3"
)
c = apply_ufunc(
np.cross,
a,
b,
input_core_dims=[[dim], [dim]],
output_core_dims=[[dim] if a.sizes[dim] == 3 else []],
dask="parallelized",
output_dtypes=[np.result_type(a, b)],
)
c = c.transpose(*all_dims, missing_dims="ignore")
return c
| cross |
xarray | 34 | xarray/coding/cftime_offsets.py | def date_range_like(source, calendar, use_cftime=None):
"""Generate a datetime array with the same frequency, start and end as
another one, but in a different calendar.
Parameters
----------
source : DataArray, CFTimeIndex, or pd.DatetimeIndex
1D datetime array
calendar : str
New calendar name.
use_cftime : bool, optional
If True, the output uses :py:class:`cftime.datetime` objects.
If None (default), :py:class:`numpy.datetime64` values are used if possible.
If False, :py:class:`numpy.datetime64` values are used or an error is raised.
Returns
-------
DataArray
1D datetime coordinate with the same start, end and frequency as the
source, but in the new calendar. The start date is assumed to exist in
the target calendar. If the end date doesn't exist, the code tries 1
and 2 calendar days before. There is a special case when the source time
series is daily or coarser and the end of the input range is on the
last day of the month. Then the output range will also end on the last
day of the month in the new calendar.
"""
| /usr/src/app/target_test_cases/failed_tests_date_range_like.txt | def date_range_like(source, calendar, use_cftime=None):
"""Generate a datetime array with the same frequency, start and end as
another one, but in a different calendar.
Parameters
----------
source : DataArray, CFTimeIndex, or pd.DatetimeIndex
1D datetime array
calendar : str
New calendar name.
use_cftime : bool, optional
If True, the output uses :py:class:`cftime.datetime` objects.
If None (default), :py:class:`numpy.datetime64` values are used if possible.
If False, :py:class:`numpy.datetime64` values are used or an error is raised.
Returns
-------
DataArray
1D datetime coordinate with the same start, end and frequency as the
source, but in the new calendar. The start date is assumed to exist in
the target calendar. If the end date doesn't exist, the code tries 1
and 2 calendar days before. There is a special case when the source time
series is daily or coarser and the end of the input range is on the
last day of the month. Then the output range will also end on the last
day of the month in the new calendar.
"""
from xarray.coding.frequencies import infer_freq
from xarray.core.dataarray import DataArray
if not isinstance(source, pd.DatetimeIndex | CFTimeIndex) and (
isinstance(source, DataArray)
and (source.ndim != 1)
or not _contains_datetime_like_objects(source.variable)
):
raise ValueError(
"'source' must be a 1D array of datetime objects for inferring its range."
)
freq = infer_freq(source)
if freq is None:
raise ValueError(
"`date_range_like` was unable to generate a range as the source frequency was not inferable."
)
# TODO remove once requiring pandas >= 2.2
freq = _legacy_to_new_freq(freq)
use_cftime = _should_cftime_be_used(source, calendar, use_cftime)
source_start = source.values.min()
source_end = source.values.max()
freq_as_offset = to_offset(freq)
if freq_as_offset.n < 0:
source_start, source_end = source_end, source_start
if is_np_datetime_like(source.dtype):
# We want to use datetime fields (datetime64 object don't have them)
source_calendar = "standard"
# TODO: the strict enforcement of nanosecond precision Timestamps can be
# relaxed when addressing GitHub issue #7493.
source_start = nanosecond_precision_timestamp(source_start)
source_end = nanosecond_precision_timestamp(source_end)
else:
if isinstance(source, CFTimeIndex):
source_calendar = source.calendar
else: # DataArray
source_calendar = source.dt.calendar
if calendar == source_calendar and is_np_datetime_like(source.dtype) ^ use_cftime:
return source
date_type = get_date_type(calendar, use_cftime)
start = convert_time_or_go_back(source_start, date_type)
end = convert_time_or_go_back(source_end, date_type)
# For the cases where the source ends on the end of the month, we expect the same in the new calendar.
if source_end.day == source_end.daysinmonth and isinstance(
freq_as_offset, YearEnd | QuarterEnd | MonthEnd | Day
):
end = end.replace(day=end.daysinmonth)
return date_range(
start=start.isoformat(),
end=end.isoformat(),
freq=freq,
calendar=calendar,
)
| date_range_like |
xarray | 35 | xarray/conventions.py | def decode_cf(
obj: T_DatasetOrAbstractstore,
concat_characters: bool = True,
mask_and_scale: bool = True,
decode_times: bool = True,
decode_coords: bool | Literal["coordinates", "all"] = True,
drop_variables: T_DropVariables = None,
use_cftime: bool | None = None,
decode_timedelta: bool | None = None,
) -> Dataset:
"""Decode the given Dataset or Datastore according to CF conventions into
a new Dataset.
Parameters
----------
obj : Dataset or DataStore
Object to decode.
concat_characters : bool, optional
Should character arrays be concatenated to strings, for
example: ["h", "e", "l", "l", "o"] -> "hello"
mask_and_scale : bool, optional
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool, optional
Decode cf times (e.g., integers since "hours since 2000-01-01") to
np.datetime64.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
drop_variables : str or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
use_cftime : bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
Returns
-------
decoded : Dataset
"""
| /usr/src/app/target_test_cases/failed_tests_decode_cf.txt | def decode_cf(
obj: T_DatasetOrAbstractstore,
concat_characters: bool = True,
mask_and_scale: bool = True,
decode_times: bool = True,
decode_coords: bool | Literal["coordinates", "all"] = True,
drop_variables: T_DropVariables = None,
use_cftime: bool | None = None,
decode_timedelta: bool | None = None,
) -> Dataset:
"""Decode the given Dataset or Datastore according to CF conventions into
a new Dataset.
Parameters
----------
obj : Dataset or DataStore
Object to decode.
concat_characters : bool, optional
Should character arrays be concatenated to strings, for
example: ["h", "e", "l", "l", "o"] -> "hello"
mask_and_scale : bool, optional
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool, optional
Decode cf times (e.g., integers since "hours since 2000-01-01") to
np.datetime64.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
drop_variables : str or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
use_cftime : bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
Returns
-------
decoded : Dataset
"""
from xarray.backends.common import AbstractDataStore
from xarray.core.dataset import Dataset
vars: T_Variables
attrs: T_Attrs
if isinstance(obj, Dataset):
vars = obj._variables
attrs = obj.attrs
extra_coords = set(obj.coords)
close = obj._close
encoding = obj.encoding
elif isinstance(obj, AbstractDataStore):
vars, attrs = obj.load()
extra_coords = set()
close = obj.close
encoding = obj.get_encoding()
else:
raise TypeError("can only decode Dataset or DataStore objects")
vars, attrs, coord_names = decode_cf_variables(
vars,
attrs,
concat_characters,
mask_and_scale,
decode_times,
decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
ds = Dataset(vars, attrs=attrs)
ds = ds.set_coords(coord_names.union(extra_coords).intersection(vars))
ds.set_close(close)
ds.encoding = encoding
return ds
| decode_cf |
xarray | 36 | xarray/coding/times.py | def decode_cf_datetime(
num_dates, units: str, calendar: str | None = None, use_cftime: bool | None = None
) -> np.ndarray:
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than cftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See Also
--------
cftime.num2date
"""
| /usr/src/app/target_test_cases/failed_tests_decode_cf_datetime.txt | def decode_cf_datetime(
num_dates, units: str, calendar: str | None = None, use_cftime: bool | None = None
) -> np.ndarray:
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than cftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See Also
--------
cftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = ravel(num_dates)
if calendar is None:
calendar = "standard"
if use_cftime is None:
try:
dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)
except (KeyError, OutOfBoundsDatetime, OutOfBoundsTimedelta, OverflowError):
dates = _decode_datetime_with_cftime(
flat_num_dates.astype(float), units, calendar
)
if (
dates[np.nanargmin(num_dates)].year < 1678
or dates[np.nanargmax(num_dates)].year >= 2262
):
if _is_standard_calendar(calendar):
warnings.warn(
"Unable to decode time axis into full "
"numpy.datetime64 objects, continuing using "
"cftime.datetime objects instead, reason: dates out "
"of range",
SerializationWarning,
stacklevel=3,
)
else:
if _is_standard_calendar(calendar):
dates = cftime_to_nptime(dates)
elif use_cftime:
dates = _decode_datetime_with_cftime(flat_num_dates, units, calendar)
else:
dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)
return reshape(dates, num_dates.shape)
| decode_cf_datetime |
xarray | 37 | xarray/conventions.py | def decode_cf_variable(
name: Hashable,
var: Variable,
concat_characters: bool = True,
mask_and_scale: bool = True,
decode_times: bool = True,
decode_endianness: bool = True,
stack_char_dim: bool = True,
use_cftime: bool | None = None,
decode_timedelta: bool | None = None,
) -> Variable:
"""
Decodes a variable which may hold CF encoded information.
This includes variables that have been masked and scaled, which
hold CF style time variables (this is almost always the case if
the dataset has been serialized) and which have strings encoded
as character arrays.
Parameters
----------
name : str
Name of the variable. Used for better error messages.
var : Variable
A variable holding potentially CF encoded information.
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ["h", "e", "l", "l", "o"] -> "hello"
mask_and_scale : bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue). If the _Unsigned attribute is present
treat integer arrays as unsigned.
decode_times : bool
Decode cf times ("hours since 2000-01-01") to np.datetime64.
decode_endianness : bool
Decode arrays from non-native to native endianness.
stack_char_dim : bool
Whether to stack characters into bytes along the last dimension of this
array. Passed as an argument because we need to look at the full
dataset to figure out if this is appropriate.
use_cftime : bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
out : Variable
A variable holding the decoded equivalent of var.
"""
| /usr/src/app/target_test_cases/failed_tests_decode_cf_variable.txt | def decode_cf_variable(
name: Hashable,
var: Variable,
concat_characters: bool = True,
mask_and_scale: bool = True,
decode_times: bool = True,
decode_endianness: bool = True,
stack_char_dim: bool = True,
use_cftime: bool | None = None,
decode_timedelta: bool | None = None,
) -> Variable:
"""
Decodes a variable which may hold CF encoded information.
This includes variables that have been masked and scaled, which
hold CF style time variables (this is almost always the case if
the dataset has been serialized) and which have strings encoded
as character arrays.
Parameters
----------
name : str
Name of the variable. Used for better error messages.
var : Variable
A variable holding potentially CF encoded information.
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ["h", "e", "l", "l", "o"] -> "hello"
mask_and_scale : bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue). If the _Unsigned attribute is present
treat integer arrays as unsigned.
decode_times : bool
Decode cf times ("hours since 2000-01-01") to np.datetime64.
decode_endianness : bool
Decode arrays from non-native to native endianness.
stack_char_dim : bool
Whether to stack characters into bytes along the last dimension of this
array. Passed as an argument because we need to look at the full
dataset to figure out if this is appropriate.
use_cftime : bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
out : Variable
A variable holding the decoded equivalent of var.
"""
# Ensure datetime-like Variables are passed through unmodified (GH 6453)
if _contains_datetime_like_objects(var):
return var
original_dtype = var.dtype
if decode_timedelta is None:
decode_timedelta = decode_times
if concat_characters:
if stack_char_dim:
var = strings.CharacterArrayCoder().decode(var, name=name)
var = strings.EncodedStringCoder().decode(var)
if original_dtype.kind == "O":
var = variables.ObjectVLenStringCoder().decode(var)
original_dtype = var.dtype
if mask_and_scale:
for coder in [
variables.CFMaskCoder(),
variables.CFScaleOffsetCoder(),
]:
var = coder.decode(var, name=name)
if decode_timedelta:
var = times.CFTimedeltaCoder().decode(var, name=name)
if decode_times:
var = times.CFDatetimeCoder(use_cftime=use_cftime).decode(var, name=name)
if decode_endianness and not var.dtype.isnative:
var = variables.EndianCoder().decode(var)
original_dtype = var.dtype
var = variables.BooleanCoder().decode(var)
dimensions, data, attributes, encoding = variables.unpack_for_decoding(var)
encoding.setdefault("dtype", original_dtype)
if not is_duck_dask_array(data):
data = indexing.LazilyIndexedArray(data)
return Variable(dimensions, data, attributes, encoding=encoding, fastpath=True)
| decode_cf_variable |
xarray | 38 | xarray/core/computation.py | def dot(
*arrays,
dim: Dims = None,
**kwargs: Any,
):
"""Generalized dot product for xarray objects. Like ``np.einsum``, but
provides a simpler interface based on array dimension names.
Parameters
----------
*arrays : DataArray or Variable
Arrays to compute.
dim : str, iterable of hashable, "..." or None, optional
Which dimensions to sum over. Ellipsis ('...') sums over all dimensions.
If not specified, then all the common dimensions are summed over.
**kwargs : dict
Additional keyword arguments passed to ``numpy.einsum`` or
``dask.array.einsum``
Returns
-------
DataArray
See Also
--------
numpy.einsum
dask.array.einsum
opt_einsum.contract
Notes
-----
We recommend installing the optional ``opt_einsum`` package, or alternatively passing ``optimize=True``,
which is passed through to ``np.einsum``, and works for most array backends.
Examples
--------
>>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=["a", "b"])
>>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2), dims=["a", "b", "c"])
>>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=["c", "d"])
>>> da_a
<xarray.DataArray (a: 3, b: 2)> Size: 48B
array([[0, 1],
[2, 3],
[4, 5]])
Dimensions without coordinates: a, b
>>> da_b
<xarray.DataArray (a: 3, b: 2, c: 2)> Size: 96B
array([[[ 0, 1],
[ 2, 3]],
<BLANKLINE>
[[ 4, 5],
[ 6, 7]],
<BLANKLINE>
[[ 8, 9],
[10, 11]]])
Dimensions without coordinates: a, b, c
>>> da_c
<xarray.DataArray (c: 2, d: 3)> Size: 48B
array([[0, 1, 2],
[3, 4, 5]])
Dimensions without coordinates: c, d
>>> xr.dot(da_a, da_b, dim=["a", "b"])
<xarray.DataArray (c: 2)> Size: 16B
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dim=["a"])
<xarray.DataArray (b: 2, c: 2)> Size: 32B
array([[40, 46],
[70, 79]])
Dimensions without coordinates: b, c
>>> xr.dot(da_a, da_b, da_c, dim=["b", "c"])
<xarray.DataArray (a: 3, d: 3)> Size: 72B
array([[ 9, 14, 19],
[ 93, 150, 207],
[273, 446, 619]])
Dimensions without coordinates: a, d
>>> xr.dot(da_a, da_b)
<xarray.DataArray (c: 2)> Size: 16B
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dim=...)
<xarray.DataArray ()> Size: 8B
array(235)
"""
| /usr/src/app/target_test_cases/failed_tests_dot.txt | def dot(
*arrays,
dim: Dims = None,
**kwargs: Any,
):
"""Generalized dot product for xarray objects. Like ``np.einsum``, but
provides a simpler interface based on array dimension names.
Parameters
----------
*arrays : DataArray or Variable
Arrays to compute.
dim : str, iterable of hashable, "..." or None, optional
Which dimensions to sum over. Ellipsis ('...') sums over all dimensions.
If not specified, then all the common dimensions are summed over.
**kwargs : dict
Additional keyword arguments passed to ``numpy.einsum`` or
``dask.array.einsum``
Returns
-------
DataArray
See Also
--------
numpy.einsum
dask.array.einsum
opt_einsum.contract
Notes
-----
We recommend installing the optional ``opt_einsum`` package, or alternatively passing ``optimize=True``,
which is passed through to ``np.einsum``, and works for most array backends.
Examples
--------
>>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=["a", "b"])
>>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2), dims=["a", "b", "c"])
>>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=["c", "d"])
>>> da_a
<xarray.DataArray (a: 3, b: 2)> Size: 48B
array([[0, 1],
[2, 3],
[4, 5]])
Dimensions without coordinates: a, b
>>> da_b
<xarray.DataArray (a: 3, b: 2, c: 2)> Size: 96B
array([[[ 0, 1],
[ 2, 3]],
<BLANKLINE>
[[ 4, 5],
[ 6, 7]],
<BLANKLINE>
[[ 8, 9],
[10, 11]]])
Dimensions without coordinates: a, b, c
>>> da_c
<xarray.DataArray (c: 2, d: 3)> Size: 48B
array([[0, 1, 2],
[3, 4, 5]])
Dimensions without coordinates: c, d
>>> xr.dot(da_a, da_b, dim=["a", "b"])
<xarray.DataArray (c: 2)> Size: 16B
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dim=["a"])
<xarray.DataArray (b: 2, c: 2)> Size: 32B
array([[40, 46],
[70, 79]])
Dimensions without coordinates: b, c
>>> xr.dot(da_a, da_b, da_c, dim=["b", "c"])
<xarray.DataArray (a: 3, d: 3)> Size: 72B
array([[ 9, 14, 19],
[ 93, 150, 207],
[273, 446, 619]])
Dimensions without coordinates: a, d
>>> xr.dot(da_a, da_b)
<xarray.DataArray (c: 2)> Size: 16B
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dim=...)
<xarray.DataArray ()> Size: 8B
array(235)
"""
from xarray.core.dataarray import DataArray
from xarray.core.variable import Variable
if any(not isinstance(arr, Variable | DataArray) for arr in arrays):
raise TypeError(
"Only xr.DataArray and xr.Variable are supported."
f"Given {[type(arr) for arr in arrays]}."
)
if len(arrays) == 0:
raise TypeError("At least one array should be given.")
common_dims: set[Hashable] = set.intersection(*(set(arr.dims) for arr in arrays))
all_dims = []
for arr in arrays:
all_dims += [d for d in arr.dims if d not in all_dims]
einsum_axes = "abcdefghijklmnopqrstuvwxyz"
dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}
if dim is None:
# find dimensions that occur more than once
dim_counts: Counter = Counter()
for arr in arrays:
dim_counts.update(arr.dims)
dim = tuple(d for d, c in dim_counts.items() if c > 1)
else:
dim = parse_dims(dim, all_dims=tuple(all_dims))
dot_dims: set[Hashable] = set(dim)
# dimensions to be parallelized
broadcast_dims = common_dims - dot_dims
input_core_dims = [
[d for d in arr.dims if d not in broadcast_dims] for arr in arrays
]
output_core_dims = [
[d for d in all_dims if d not in dot_dims and d not in broadcast_dims]
]
# construct einsum subscripts, such as '...abc,...ab->...c'
# Note: input_core_dims are always moved to the last position
subscripts_list = [
"..." + "".join(dim_map[d] for d in ds) for ds in input_core_dims
]
subscripts = ",".join(subscripts_list)
subscripts += "->..." + "".join(dim_map[d] for d in output_core_dims[0])
join = OPTIONS["arithmetic_join"]
# using "inner" emulates `(a * b).sum()` for all joins (except "exact")
if join != "exact":
join = "inner"
# subscripts should be passed to np.einsum as arg, not as kwargs. We need
# to construct a partial function for apply_ufunc to work.
func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)
result = apply_ufunc(
func,
*arrays,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
join=join,
dask="allowed",
)
return result.transpose(*all_dims, missing_dims="ignore")
| dot |
xarray | 39 | xarray/conventions.py | def encode_cf_variable(
var: Variable, needs_copy: bool = True, name: T_Name = None
) -> Variable:
"""
Converts a Variable into a Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : Variable
A variable holding un-encoded data.
Returns
-------
out : Variable
A variable which has been encoded as described above.
"""
| /usr/src/app/target_test_cases/failed_tests_encode_cf_variable.txt | def encode_cf_variable(
var: Variable, needs_copy: bool = True, name: T_Name = None
) -> Variable:
"""
Converts a Variable into a Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : Variable
A variable holding un-encoded data.
Returns
-------
out : Variable
A variable which has been encoded as described above.
"""
ensure_not_multiindex(var, name=name)
for coder in [
times.CFDatetimeCoder(),
times.CFTimedeltaCoder(),
variables.CFScaleOffsetCoder(),
variables.CFMaskCoder(),
variables.NativeEnumCoder(),
variables.NonStringCoder(),
variables.DefaultFillvalueCoder(),
variables.BooleanCoder(),
]:
var = coder.encode(var, name=name)
# TODO(kmuehlbauer): check if ensure_dtype_not_object can be moved to backends:
var = ensure_dtype_not_object(var, name=name)
for attr_name in CF_RELATED_DATA:
pop_to(var.encoding, var.attrs, attr_name)
return var
| encode_cf_variable |
xarray | 40 | xarray/conventions.py | def encode_dataset_coordinates(dataset: Dataset):
"""Encode coordinates on the given dataset object into variable specific
and global attributes.
When possible, this is done according to CF conventions.
Parameters
----------
dataset : Dataset
Object to encode.
Returns
-------
variables : dict
attrs : dict
"""
| /usr/src/app/target_test_cases/failed_tests_encode_dataset_coordinates.txt | def encode_dataset_coordinates(dataset: Dataset):
"""Encode coordinates on the given dataset object into variable specific
and global attributes.
When possible, this is done according to CF conventions.
Parameters
----------
dataset : Dataset
Object to encode.
Returns
-------
variables : dict
attrs : dict
"""
non_dim_coord_names = set(dataset.coords) - set(dataset.dims)
return _encode_coordinates(
dataset._variables, dataset.attrs, non_dim_coord_names=non_dim_coord_names
)
| encode_dataset_coordinates |
xarray | 41 | xarray/namedarray/_array_api.py | def expand_dims(
x: NamedArray[Any, _DType],
/,
*,
dim: _Dim | Default = _default,
axis: _Axis = 0,
) -> NamedArray[Any, _DType]:
"""
Expands the shape of an array by inserting a new dimension of size one at the
position specified by dims.
Parameters
----------
x :
Array to expand.
dim :
Dimension name. New dimension will be stored in the axis position.
axis :
(Not recommended) Axis position (zero-based). Default is 0.
Returns
-------
out :
An expanded output array having the same data type as x.
Examples
--------
>>> x = NamedArray(("x", "y"), np.asarray([[1.0, 2.0], [3.0, 4.0]]))
>>> expand_dims(x)
<xarray.NamedArray (dim_2: 1, x: 2, y: 2)> Size: 32B
array([[[1., 2.],
[3., 4.]]])
>>> expand_dims(x, dim="z")
<xarray.NamedArray (z: 1, x: 2, y: 2)> Size: 32B
array([[[1., 2.],
[3., 4.]]])
"""
| /usr/src/app/target_test_cases/failed_tests_expand_dims.txt | def expand_dims(
x: NamedArray[Any, _DType],
/,
*,
dim: _Dim | Default = _default,
axis: _Axis = 0,
) -> NamedArray[Any, _DType]:
"""
Expands the shape of an array by inserting a new dimension of size one at the
position specified by dims.
Parameters
----------
x :
Array to expand.
dim :
Dimension name. New dimension will be stored in the axis position.
axis :
(Not recommended) Axis position (zero-based). Default is 0.
Returns
-------
out :
An expanded output array having the same data type as x.
Examples
--------
>>> x = NamedArray(("x", "y"), np.asarray([[1.0, 2.0], [3.0, 4.0]]))
>>> expand_dims(x)
<xarray.NamedArray (dim_2: 1, x: 2, y: 2)> Size: 32B
array([[[1., 2.],
[3., 4.]]])
>>> expand_dims(x, dim="z")
<xarray.NamedArray (z: 1, x: 2, y: 2)> Size: 32B
array([[[1., 2.],
[3., 4.]]])
"""
xp = _get_data_namespace(x)
dims = x.dims
if dim is _default:
dim = f"dim_{len(dims)}"
d = list(dims)
d.insert(axis, dim)
out = x._new(dims=tuple(d), data=xp.expand_dims(x._data, axis=axis))
return out
| expand_dims |
xarray | 42 | xarray/core/accessor_str.py | def format(
self,
*args: Any,
**kwargs: Any,
) -> T_DataArray:
"""
Perform python string formatting on each element of the DataArray.
This is equivalent to calling `str.format` on every element of the
DataArray. The replacement values can either be a string-like
scalar or array-like of string-like values. If array-like,
the values will be broadcast and applied elementwiseto the input
DataArray.
.. note::
Array-like values provided as `*args` will have their
dimensions added even if those arguments are not used in any
string formatting.
.. warning::
Array-like arguments are only applied elementwise for `*args`.
For `**kwargs`, values are used as-is.
Parameters
----------
*args : str or bytes or array-like of str or bytes
Values for positional formatting.
If array-like, the values are broadcast and applied elementwise.
The dimensions will be placed at the end of the output array dimensions
in the order they are provided.
**kwargs : str or bytes or array-like of str or bytes
Values for keyword-based formatting.
These are **not** broadcast or applied elementwise.
Returns
-------
formatted : same type as values
Examples
--------
Create an array to format.
>>> values = xr.DataArray(
... ["{} is {adj0}", "{} and {} are {adj1}"],
... dims=["X"],
... )
Set the values to fill.
>>> noun0 = xr.DataArray(
... ["spam", "egg"],
... dims=["Y"],
... )
>>> noun1 = xr.DataArray(
... ["lancelot", "arthur"],
... dims=["ZZ"],
... )
>>> adj0 = "unexpected"
>>> adj1 = "like a duck"
Insert the values into the array
>>> values.str.format(noun0, noun1, adj0=adj0, adj1=adj1)
<xarray.DataArray (X: 2, Y: 2, ZZ: 2)> Size: 1kB
array([[['spam is unexpected', 'spam is unexpected'],
['egg is unexpected', 'egg is unexpected']],
<BLANKLINE>
[['spam and lancelot are like a duck',
'spam and arthur are like a duck'],
['egg and lancelot are like a duck',
'egg and arthur are like a duck']]], dtype='<U33')
Dimensions without coordinates: X, Y, ZZ
See Also
--------
str.format
"""
| /usr/src/app/target_test_cases/failed_tests_format.txt | def format(
self,
*args: Any,
**kwargs: Any,
) -> T_DataArray:
"""
Perform python string formatting on each element of the DataArray.
This is equivalent to calling `str.format` on every element of the
DataArray. The replacement values can either be a string-like
scalar or array-like of string-like values. If array-like,
the values will be broadcast and applied elementwiseto the input
DataArray.
.. note::
Array-like values provided as `*args` will have their
dimensions added even if those arguments are not used in any
string formatting.
.. warning::
Array-like arguments are only applied elementwise for `*args`.
For `**kwargs`, values are used as-is.
Parameters
----------
*args : str or bytes or array-like of str or bytes
Values for positional formatting.
If array-like, the values are broadcast and applied elementwise.
The dimensions will be placed at the end of the output array dimensions
in the order they are provided.
**kwargs : str or bytes or array-like of str or bytes
Values for keyword-based formatting.
These are **not** broadcast or applied elementwise.
Returns
-------
formatted : same type as values
Examples
--------
Create an array to format.
>>> values = xr.DataArray(
... ["{} is {adj0}", "{} and {} are {adj1}"],
... dims=["X"],
... )
Set the values to fill.
>>> noun0 = xr.DataArray(
... ["spam", "egg"],
... dims=["Y"],
... )
>>> noun1 = xr.DataArray(
... ["lancelot", "arthur"],
... dims=["ZZ"],
... )
>>> adj0 = "unexpected"
>>> adj1 = "like a duck"
Insert the values into the array
>>> values.str.format(noun0, noun1, adj0=adj0, adj1=adj1)
<xarray.DataArray (X: 2, Y: 2, ZZ: 2)> Size: 1kB
array([[['spam is unexpected', 'spam is unexpected'],
['egg is unexpected', 'egg is unexpected']],
<BLANKLINE>
[['spam and lancelot are like a duck',
'spam and arthur are like a duck'],
['egg and lancelot are like a duck',
'egg and arthur are like a duck']]], dtype='<U33')
Dimensions without coordinates: X, Y, ZZ
See Also
--------
str.format
"""
args = tuple(self._stringify(x) for x in args)
kwargs = {key: self._stringify(val) for key, val in kwargs.items()}
func = lambda x, *args, **kwargs: self._obj.dtype.type.format(
x, *args, **kwargs
)
return self._apply(func=func, func_args=args, func_kwargs={"kwargs": kwargs})
| format |
xarray | 43 | xarray/core/missing.py | def get_clean_interp_index(
arr, dim: Hashable, use_coordinate: str | bool = True, strict: bool = True
):
"""Return index to use for x values in interpolation or curve fitting.
Parameters
----------
arr : DataArray
Array to interpolate or fit to a curve.
dim : str
Name of dimension along which to fit.
use_coordinate : str or bool
If use_coordinate is True, the coordinate that shares the name of the
dimension along which interpolation is being performed will be used as the
x values. If False, the x values are set as an equally spaced sequence.
strict : bool
Whether to raise errors if the index is either non-unique or non-monotonic (default).
Returns
-------
Variable
Numerical values for the x-coordinates.
Notes
-----
If indexing is along the time dimension, datetime coordinates are converted
to time deltas with respect to 1970-01-01.
"""
| /usr/src/app/target_test_cases/failed_tests_get_clean_interp_index.txt | def get_clean_interp_index(
arr, dim: Hashable, use_coordinate: str | bool = True, strict: bool = True
):
"""Return index to use for x values in interpolation or curve fitting.
Parameters
----------
arr : DataArray
Array to interpolate or fit to a curve.
dim : str
Name of dimension along which to fit.
use_coordinate : str or bool
If use_coordinate is True, the coordinate that shares the name of the
dimension along which interpolation is being performed will be used as the
x values. If False, the x values are set as an equally spaced sequence.
strict : bool
Whether to raise errors if the index is either non-unique or non-monotonic (default).
Returns
-------
Variable
Numerical values for the x-coordinates.
Notes
-----
If indexing is along the time dimension, datetime coordinates are converted
to time deltas with respect to 1970-01-01.
"""
# Question: If use_coordinate is a string, what role does `dim` play?
from xarray.coding.cftimeindex import CFTimeIndex
if use_coordinate is False:
axis = arr.get_axis_num(dim)
return np.arange(arr.shape[axis], dtype=np.float64)
if use_coordinate is True:
index = arr.get_index(dim)
else: # string
index = arr.coords[use_coordinate]
if index.ndim != 1:
raise ValueError(
f"Coordinates used for interpolation must be 1D, "
f"{use_coordinate} is {index.ndim}D."
)
index = index.to_index()
# TODO: index.name is None for multiindexes
# set name for nice error messages below
if isinstance(index, pd.MultiIndex):
index.name = dim
if strict:
if not index.is_monotonic_increasing:
raise ValueError(f"Index {index.name!r} must be monotonically increasing")
if not index.is_unique:
raise ValueError(f"Index {index.name!r} has duplicate values")
# Special case for non-standard calendar indexes
# Numerical datetime values are defined with respect to 1970-01-01T00:00:00 in units of nanoseconds
if isinstance(index, CFTimeIndex | pd.DatetimeIndex):
offset = type(index[0])(1970, 1, 1)
if isinstance(index, CFTimeIndex):
index = index.values
index = Variable(
data=datetime_to_numeric(index, offset=offset, datetime_unit="ns"),
dims=(dim,),
)
# raise if index cannot be cast to a float (e.g. MultiIndex)
try:
index = index.values.astype(np.float64)
except (TypeError, ValueError) as err:
# pandas raises a TypeError
# xarray/numpy raise a ValueError
raise TypeError(
f"Index {index.name!r} must be castable to float64 to support "
f"interpolation or curve fitting, got {type(index).__name__}."
) from err
return index
| get_clean_interp_index |
xarray | 44 | xarray/core/dataset.py | def head(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with the first `n` values of each array
for the specified dimension(s).
Parameters
----------
indexers : dict or int, default: 5
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Examples
--------
>>> dates = pd.date_range(start="2023-01-01", periods=5)
>>> pageviews = [1200, 1500, 900, 1800, 2000]
>>> visitors = [800, 1000, 600, 1200, 1500]
>>> dataset = xr.Dataset(
... {
... "pageviews": (("date"), pageviews),
... "visitors": (("date"), visitors),
... },
... coords={"date": dates},
... )
>>> busiest_days = dataset.sortby("pageviews", ascending=False)
>>> busiest_days.head()
<xarray.Dataset> Size: 120B
Dimensions: (date: 5)
Coordinates:
* date (date) datetime64[ns] 40B 2023-01-05 2023-01-04 ... 2023-01-03
Data variables:
pageviews (date) int64 40B 2000 1800 1500 1200 900
visitors (date) int64 40B 1500 1200 1000 800 600
# Retrieve the 3 most busiest days in terms of pageviews
>>> busiest_days.head(3)
<xarray.Dataset> Size: 72B
Dimensions: (date: 3)
Coordinates:
* date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02
Data variables:
pageviews (date) int64 24B 2000 1800 1500
visitors (date) int64 24B 1500 1200 1000
# Using a dictionary to specify the number of elements for specific dimensions
>>> busiest_days.head({"date": 3})
<xarray.Dataset> Size: 72B
Dimensions: (date: 3)
Coordinates:
* date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02
Data variables:
pageviews (date) int64 24B 2000 1800 1500
visitors (date) int64 24B 1500 1200 1000
See Also
--------
Dataset.tail
Dataset.thin
DataArray.head
"""
| /usr/src/app/target_test_cases/failed_tests_head.txt | def head(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with the first `n` values of each array
for the specified dimension(s).
Parameters
----------
indexers : dict or int, default: 5
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Examples
--------
>>> dates = pd.date_range(start="2023-01-01", periods=5)
>>> pageviews = [1200, 1500, 900, 1800, 2000]
>>> visitors = [800, 1000, 600, 1200, 1500]
>>> dataset = xr.Dataset(
... {
... "pageviews": (("date"), pageviews),
... "visitors": (("date"), visitors),
... },
... coords={"date": dates},
... )
>>> busiest_days = dataset.sortby("pageviews", ascending=False)
>>> busiest_days.head()
<xarray.Dataset> Size: 120B
Dimensions: (date: 5)
Coordinates:
* date (date) datetime64[ns] 40B 2023-01-05 2023-01-04 ... 2023-01-03
Data variables:
pageviews (date) int64 40B 2000 1800 1500 1200 900
visitors (date) int64 40B 1500 1200 1000 800 600
# Retrieve the 3 most busiest days in terms of pageviews
>>> busiest_days.head(3)
<xarray.Dataset> Size: 72B
Dimensions: (date: 3)
Coordinates:
* date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02
Data variables:
pageviews (date) int64 24B 2000 1800 1500
visitors (date) int64 24B 1500 1200 1000
# Using a dictionary to specify the number of elements for specific dimensions
>>> busiest_days.head({"date": 3})
<xarray.Dataset> Size: 72B
Dimensions: (date: 3)
Coordinates:
* date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02
Data variables:
pageviews (date) int64 24B 2000 1800 1500
visitors (date) int64 24B 1500 1200 1000
See Also
--------
Dataset.tail
Dataset.thin
DataArray.head
"""
if not indexers_kwargs:
if indexers is None:
indexers = 5
if not isinstance(indexers, int) and not is_dict_like(indexers):
raise TypeError("indexers must be either dict-like or a single integer")
if isinstance(indexers, int):
indexers = {dim: indexers for dim in self.dims}
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "head")
for k, v in indexers.items():
if not isinstance(v, int):
raise TypeError(
"expected integer type indexer for "
f"dimension {k!r}, found {type(v)!r}"
)
elif v < 0:
raise ValueError(
"expected positive integer as indexer "
f"for dimension {k!r}, found {v}"
)
indexers_slices = {k: slice(val) for k, val in indexers.items()}
return self.isel(indexers_slices)
| head |
xarray | 45 | xarray/plot/dataarray_plot.py | def hist(
darray: DataArray,
*args: Any,
figsize: Iterable[float] | None = None,
size: float | None = None,
aspect: AspectOptions = None,
ax: Axes | None = None,
xincrease: bool | None = None,
yincrease: bool | None = None,
xscale: ScaleOptions = None,
yscale: ScaleOptions = None,
xticks: ArrayLike | None = None,
yticks: ArrayLike | None = None,
xlim: tuple[float, float] | None = None,
ylim: tuple[float, float] | None = None,
**kwargs: Any,
) -> tuple[np.ndarray, np.ndarray, BarContainer | Polygon]:
"""
Histogram of DataArray.
Wraps :py:func:`matplotlib:matplotlib.pyplot.hist`.
Plots *N*-dimensional arrays by first flattening the array.
Parameters
----------
darray : DataArray
Can have any number of dimensions.
figsize : Iterable of float, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : "auto", "equal", scalar or None, optional
Aspect ratio of plot, so that ``aspect * size`` gives the *width* in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size:
*height* (in inches) of each plot. See also: ``aspect``.
ax : matplotlib axes object, optional
Axes on which to plot. By default, use the current axes.
Mutually exclusive with ``size`` and ``figsize``.
xincrease : bool or None, optional
Should the values on the *x* axis be increasing from left to right?
if ``None``, use the default for the Matplotlib function.
yincrease : bool or None, optional
Should the values on the *y* axis be increasing from top to bottom?
if ``None``, use the default for the Matplotlib function.
xscale, yscale : {'linear', 'symlog', 'log', 'logit'}, optional
Specifies scaling for the *x*- and *y*-axis, respectively.
xticks, yticks : array-like, optional
Specify tick locations for *x*- and *y*-axis.
xlim, ylim : tuple[float, float], optional
Specify *x*- and *y*-axis limits.
**kwargs : optional
Additional keyword arguments to :py:func:`matplotlib:matplotlib.pyplot.hist`.
"""
| /usr/src/app/target_test_cases/failed_tests_hist.txt | def hist(
darray: DataArray,
*args: Any,
figsize: Iterable[float] | None = None,
size: float | None = None,
aspect: AspectOptions = None,
ax: Axes | None = None,
xincrease: bool | None = None,
yincrease: bool | None = None,
xscale: ScaleOptions = None,
yscale: ScaleOptions = None,
xticks: ArrayLike | None = None,
yticks: ArrayLike | None = None,
xlim: tuple[float, float] | None = None,
ylim: tuple[float, float] | None = None,
**kwargs: Any,
) -> tuple[np.ndarray, np.ndarray, BarContainer | Polygon]:
"""
Histogram of DataArray.
Wraps :py:func:`matplotlib:matplotlib.pyplot.hist`.
Plots *N*-dimensional arrays by first flattening the array.
Parameters
----------
darray : DataArray
Can have any number of dimensions.
figsize : Iterable of float, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : "auto", "equal", scalar or None, optional
Aspect ratio of plot, so that ``aspect * size`` gives the *width* in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size:
*height* (in inches) of each plot. See also: ``aspect``.
ax : matplotlib axes object, optional
Axes on which to plot. By default, use the current axes.
Mutually exclusive with ``size`` and ``figsize``.
xincrease : bool or None, optional
Should the values on the *x* axis be increasing from left to right?
if ``None``, use the default for the Matplotlib function.
yincrease : bool or None, optional
Should the values on the *y* axis be increasing from top to bottom?
if ``None``, use the default for the Matplotlib function.
xscale, yscale : {'linear', 'symlog', 'log', 'logit'}, optional
Specifies scaling for the *x*- and *y*-axis, respectively.
xticks, yticks : array-like, optional
Specify tick locations for *x*- and *y*-axis.
xlim, ylim : tuple[float, float], optional
Specify *x*- and *y*-axis limits.
**kwargs : optional
Additional keyword arguments to :py:func:`matplotlib:matplotlib.pyplot.hist`.
"""
assert len(args) == 0
if darray.ndim == 0 or darray.size == 0:
# TypeError to be consistent with pandas
raise TypeError("No numeric data to plot.")
ax = get_axis(figsize, size, aspect, ax)
no_nan = np.ravel(darray.to_numpy())
no_nan = no_nan[pd.notnull(no_nan)]
n, bins, patches = cast(
tuple[np.ndarray, np.ndarray, Union["BarContainer", "Polygon"]],
ax.hist(no_nan, **kwargs),
)
ax.set_title(darray._title_for_slice())
ax.set_xlabel(label_from_attrs(darray))
_update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim)
return n, bins, patches
| hist |
xarray | 46 | xarray/core/dataset.py | def identical(self, other: Self) -> bool:
"""Like equals, but also checks all dataset attributes and the
attributes on all variables and coordinates.
Example
-------
>>> a = xr.Dataset(
... {"Width": ("X", [1, 2, 3])},
... coords={"X": [1, 2, 3]},
... attrs={"units": "m"},
... )
>>> b = xr.Dataset(
... {"Width": ("X", [1, 2, 3])},
... coords={"X": [1, 2, 3]},
... attrs={"units": "m"},
... )
>>> c = xr.Dataset(
... {"Width": ("X", [1, 2, 3])},
... coords={"X": [1, 2, 3]},
... attrs={"units": "ft"},
... )
>>> a
<xarray.Dataset> Size: 48B
Dimensions: (X: 3)
Coordinates:
* X (X) int64 24B 1 2 3
Data variables:
Width (X) int64 24B 1 2 3
Attributes:
units: m
>>> b
<xarray.Dataset> Size: 48B
Dimensions: (X: 3)
Coordinates:
* X (X) int64 24B 1 2 3
Data variables:
Width (X) int64 24B 1 2 3
Attributes:
units: m
>>> c
<xarray.Dataset> Size: 48B
Dimensions: (X: 3)
Coordinates:
* X (X) int64 24B 1 2 3
Data variables:
Width (X) int64 24B 1 2 3
Attributes:
units: ft
>>> a.equals(b)
True
>>> a.identical(b)
True
>>> a.equals(c)
True
>>> a.identical(c)
False
See Also
--------
Dataset.broadcast_equals
Dataset.equals
"""
| /usr/src/app/target_test_cases/failed_tests_identical.txt | def identical(self, other: Self) -> bool:
"""Like equals, but also checks all dataset attributes and the
attributes on all variables and coordinates.
Example
-------
>>> a = xr.Dataset(
... {"Width": ("X", [1, 2, 3])},
... coords={"X": [1, 2, 3]},
... attrs={"units": "m"},
... )
>>> b = xr.Dataset(
... {"Width": ("X", [1, 2, 3])},
... coords={"X": [1, 2, 3]},
... attrs={"units": "m"},
... )
>>> c = xr.Dataset(
... {"Width": ("X", [1, 2, 3])},
... coords={"X": [1, 2, 3]},
... attrs={"units": "ft"},
... )
>>> a
<xarray.Dataset> Size: 48B
Dimensions: (X: 3)
Coordinates:
* X (X) int64 24B 1 2 3
Data variables:
Width (X) int64 24B 1 2 3
Attributes:
units: m
>>> b
<xarray.Dataset> Size: 48B
Dimensions: (X: 3)
Coordinates:
* X (X) int64 24B 1 2 3
Data variables:
Width (X) int64 24B 1 2 3
Attributes:
units: m
>>> c
<xarray.Dataset> Size: 48B
Dimensions: (X: 3)
Coordinates:
* X (X) int64 24B 1 2 3
Data variables:
Width (X) int64 24B 1 2 3
Attributes:
units: ft
>>> a.equals(b)
True
>>> a.identical(b)
True
>>> a.equals(c)
True
>>> a.identical(c)
False
See Also
--------
Dataset.broadcast_equals
Dataset.equals
"""
try:
return utils.dict_equiv(self.attrs, other.attrs) and self._all_compat(
other, "identical"
)
except (TypeError, AttributeError):
return False
| identical |
xarray | 47 | xarray/coding/frequencies.py | def infer_freq(index):
"""
Infer the most likely frequency given the input index.
Parameters
----------
index : CFTimeIndex, DataArray, DatetimeIndex, TimedeltaIndex, Series
If not passed a CFTimeIndex, this simply calls `pandas.infer_freq`.
If passed a Series or a DataArray will use the values of the series (NOT THE INDEX).
Returns
-------
str or None
None if no discernible frequency.
Raises
------
TypeError
If the index is not datetime-like.
ValueError
If there are fewer than three values or the index is not 1D.
"""
| /usr/src/app/target_test_cases/failed_tests_infer_freq.txt | def infer_freq(index):
"""
Infer the most likely frequency given the input index.
Parameters
----------
index : CFTimeIndex, DataArray, DatetimeIndex, TimedeltaIndex, Series
If not passed a CFTimeIndex, this simply calls `pandas.infer_freq`.
If passed a Series or a DataArray will use the values of the series (NOT THE INDEX).
Returns
-------
str or None
None if no discernible frequency.
Raises
------
TypeError
If the index is not datetime-like.
ValueError
If there are fewer than three values or the index is not 1D.
"""
from xarray.core.dataarray import DataArray
from xarray.core.variable import Variable
if isinstance(index, DataArray | pd.Series):
if index.ndim != 1:
raise ValueError("'index' must be 1D")
elif not _contains_datetime_like_objects(Variable("dim", index)):
raise ValueError("'index' must contain datetime-like objects")
dtype = np.asarray(index).dtype
if dtype == "datetime64[ns]":
index = pd.DatetimeIndex(index.values)
elif dtype == "timedelta64[ns]":
index = pd.TimedeltaIndex(index.values)
else:
index = CFTimeIndex(index.values)
if isinstance(index, CFTimeIndex):
inferer = _CFTimeFrequencyInferer(index)
return inferer.get_freq()
return _legacy_to_new_freq(pd.infer_freq(index))
| infer_freq |
xarray | 48 | xarray/coding/calendar_ops.py | def interp_calendar(source, target, dim="time"):
"""Interpolates a DataArray or Dataset indexed by a time coordinate to
another calendar based on decimal year measure.
Each timestamp in `source` and `target` are first converted to their decimal
year equivalent then `source` is interpolated on the target coordinate.
The decimal year of a timestamp is its year plus its sub-year component
converted to the fraction of its year. For example "2000-03-01 12:00" is
2000.1653 in a standard calendar or 2000.16301 in a `"noleap"` calendar.
This method should only be used when the time (HH:MM:SS) information of
time coordinate is not important.
Parameters
----------
source: DataArray or Dataset
The source data to interpolate; must have a time coordinate of a valid
dtype (:py:class:`numpy.datetime64` or :py:class:`cftime.datetime` objects)
target: DataArray, DatetimeIndex, or CFTimeIndex
The target time coordinate of a valid dtype (np.datetime64 or cftime objects)
dim : str
The time coordinate name.
Return
------
DataArray or Dataset
The source interpolated on the decimal years of target,
"""
| /usr/src/app/target_test_cases/failed_tests_interp_calendar.txt | def interp_calendar(source, target, dim="time"):
"""Interpolates a DataArray or Dataset indexed by a time coordinate to
another calendar based on decimal year measure.
Each timestamp in `source` and `target` are first converted to their decimal
year equivalent then `source` is interpolated on the target coordinate.
The decimal year of a timestamp is its year plus its sub-year component
converted to the fraction of its year. For example "2000-03-01 12:00" is
2000.1653 in a standard calendar or 2000.16301 in a `"noleap"` calendar.
This method should only be used when the time (HH:MM:SS) information of
time coordinate is not important.
Parameters
----------
source: DataArray or Dataset
The source data to interpolate; must have a time coordinate of a valid
dtype (:py:class:`numpy.datetime64` or :py:class:`cftime.datetime` objects)
target: DataArray, DatetimeIndex, or CFTimeIndex
The target time coordinate of a valid dtype (np.datetime64 or cftime objects)
dim : str
The time coordinate name.
Return
------
DataArray or Dataset
The source interpolated on the decimal years of target,
"""
from xarray.core.dataarray import DataArray
if isinstance(target, pd.DatetimeIndex | CFTimeIndex):
target = DataArray(target, dims=(dim,), name=dim)
if not _contains_datetime_like_objects(
source[dim].variable
) or not _contains_datetime_like_objects(target.variable):
raise ValueError(
f"Both 'source.{dim}' and 'target' must contain datetime objects."
)
target_calendar = target.dt.calendar
if (
source[dim].time.dt.year == 0
).any() and target_calendar in _CALENDARS_WITHOUT_YEAR_ZERO:
raise ValueError(
f"Source time coordinate contains dates with year 0, which is not supported by target calendar {target_calendar}."
)
out = source.copy()
out[dim] = _decimal_year(source[dim])
target_idx = _decimal_year(target)
out = out.interp(**{dim: target_idx})
out[dim] = target
return out
| interp_calendar |
xarray | 49 | xarray/core/missing.py | def interp_func(var, x, new_x, method: InterpOptions, kwargs):
"""
multi-dimensional interpolation for array-like. Interpolated axes should be
located in the last position.
Parameters
----------
var : np.ndarray or dask.array.Array
Array to be interpolated. The final dimension is interpolated.
x : a list of 1d array.
Original coordinates. Should not contain NaN.
new_x : a list of 1d array
New coordinates. Should not contain NaN.
method : string
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'pchip', 'akima',
'makima', 'barycentric', 'krogh'} for 1-dimensional interpolation.
{'linear', 'nearest'} for multidimensional interpolation
**kwargs
Optional keyword arguments to be passed to scipy.interpolator
Returns
-------
interpolated: array
Interpolated array
Notes
-----
This requires scipy installed.
See Also
--------
scipy.interpolate.interp1d
"""
| /usr/src/app/target_test_cases/failed_tests_interp_func.txt | def interp_func(var, x, new_x, method: InterpOptions, kwargs):
"""
multi-dimensional interpolation for array-like. Interpolated axes should be
located in the last position.
Parameters
----------
var : np.ndarray or dask.array.Array
Array to be interpolated. The final dimension is interpolated.
x : a list of 1d array.
Original coordinates. Should not contain NaN.
new_x : a list of 1d array
New coordinates. Should not contain NaN.
method : string
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'pchip', 'akima',
'makima', 'barycentric', 'krogh'} for 1-dimensional interpolation.
{'linear', 'nearest'} for multidimensional interpolation
**kwargs
Optional keyword arguments to be passed to scipy.interpolator
Returns
-------
interpolated: array
Interpolated array
Notes
-----
This requires scipy installed.
See Also
--------
scipy.interpolate.interp1d
"""
if not x:
return var.copy()
if len(x) == 1:
func, kwargs = _get_interpolator(method, vectorizeable_only=True, **kwargs)
else:
func, kwargs = _get_interpolator_nd(method, **kwargs)
if is_chunked_array(var):
chunkmanager = get_chunked_array_type(var)
ndim = var.ndim
nconst = ndim - len(x)
out_ind = list(range(nconst)) + list(range(ndim, ndim + new_x[0].ndim))
# blockwise args format
x_arginds = [[_x, (nconst + index,)] for index, _x in enumerate(x)]
x_arginds = [item for pair in x_arginds for item in pair]
new_x_arginds = [
[_x, [ndim + index for index in range(_x.ndim)]] for _x in new_x
]
new_x_arginds = [item for pair in new_x_arginds for item in pair]
args = (var, range(ndim), *x_arginds, *new_x_arginds)
_, rechunked = chunkmanager.unify_chunks(*args)
args = tuple(
elem for pair in zip(rechunked, args[1::2], strict=True) for elem in pair
)
new_x = rechunked[1 + (len(rechunked) - 1) // 2 :]
new_x0_chunks = new_x[0].chunks
new_x0_shape = new_x[0].shape
new_x0_chunks_is_not_none = new_x0_chunks is not None
new_axes = {
ndim + i: new_x0_chunks[i] if new_x0_chunks_is_not_none else new_x0_shape[i]
for i in range(new_x[0].ndim)
}
# if useful, reuse localize for each chunk of new_x
localize = (method in ["linear", "nearest"]) and new_x0_chunks_is_not_none
# scipy.interpolate.interp1d always forces to float.
# Use the same check for blockwise as well:
if not issubclass(var.dtype.type, np.inexact):
dtype = float
else:
dtype = var.dtype
meta = var._meta
return chunkmanager.blockwise(
_chunked_aware_interpnd,
out_ind,
*args,
interp_func=func,
interp_kwargs=kwargs,
localize=localize,
concatenate=True,
dtype=dtype,
new_axes=new_axes,
meta=meta,
align_arrays=False,
)
return _interpnd(var, x, new_x, func, kwargs)
| interp_func |
xarray | 50 | xarray/core/dataset.py | def isel(
self,
indexers: Mapping[Any, Any] | None = None,
drop: bool = False,
missing_dims: ErrorOptionsWithWarn = "raise",
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
drop : bool, default: False
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
Dataset:
- "raise": raise an exception
- "warn": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "math_scores": (
... ["student", "test"],
... [[90, 85, 92], [78, 80, 85], [95, 92, 98]],
... ),
... "english_scores": (
... ["student", "test"],
... [[88, 90, 92], [75, 82, 79], [93, 96, 91]],
... ),
... },
... coords={
... "student": ["Alice", "Bob", "Charlie"],
... "test": ["Test 1", "Test 2", "Test 3"],
... },
... )
# A specific element from the dataset is selected
>>> dataset.isel(student=1, test=0)
<xarray.Dataset> Size: 68B
Dimensions: ()
Coordinates:
student <U7 28B 'Bob'
test <U6 24B 'Test 1'
Data variables:
math_scores int64 8B 78
english_scores int64 8B 75
# Indexing with a slice using isel
>>> slice_of_data = dataset.isel(student=slice(0, 2), test=slice(0, 2))
>>> slice_of_data
<xarray.Dataset> Size: 168B
Dimensions: (student: 2, test: 2)
Coordinates:
* student (student) <U7 56B 'Alice' 'Bob'
* test (test) <U6 48B 'Test 1' 'Test 2'
Data variables:
math_scores (student, test) int64 32B 90 85 78 80
english_scores (student, test) int64 32B 88 90 75 82
>>> index_array = xr.DataArray([0, 2], dims="student")
>>> indexed_data = dataset.isel(student=index_array)
>>> indexed_data
<xarray.Dataset> Size: 224B
Dimensions: (student: 2, test: 3)
Coordinates:
* student (student) <U7 56B 'Alice' 'Charlie'
* test (test) <U6 72B 'Test 1' 'Test 2' 'Test 3'
Data variables:
math_scores (student, test) int64 48B 90 85 92 95 92 98
english_scores (student, test) int64 48B 88 90 92 93 96 91
See Also
--------
Dataset.sel
DataArray.isel
:doc:`xarray-tutorial:intermediate/indexing/indexing`
Tutorial material on indexing with Xarray objects
:doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic`
Tutorial material on basics of indexing
"""
| /usr/src/app/target_test_cases/failed_tests_isel.txt | def isel(
self,
indexers: Mapping[Any, Any] | None = None,
drop: bool = False,
missing_dims: ErrorOptionsWithWarn = "raise",
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
drop : bool, default: False
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
Dataset:
- "raise": raise an exception
- "warn": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "math_scores": (
... ["student", "test"],
... [[90, 85, 92], [78, 80, 85], [95, 92, 98]],
... ),
... "english_scores": (
... ["student", "test"],
... [[88, 90, 92], [75, 82, 79], [93, 96, 91]],
... ),
... },
... coords={
... "student": ["Alice", "Bob", "Charlie"],
... "test": ["Test 1", "Test 2", "Test 3"],
... },
... )
# A specific element from the dataset is selected
>>> dataset.isel(student=1, test=0)
<xarray.Dataset> Size: 68B
Dimensions: ()
Coordinates:
student <U7 28B 'Bob'
test <U6 24B 'Test 1'
Data variables:
math_scores int64 8B 78
english_scores int64 8B 75
# Indexing with a slice using isel
>>> slice_of_data = dataset.isel(student=slice(0, 2), test=slice(0, 2))
>>> slice_of_data
<xarray.Dataset> Size: 168B
Dimensions: (student: 2, test: 2)
Coordinates:
* student (student) <U7 56B 'Alice' 'Bob'
* test (test) <U6 48B 'Test 1' 'Test 2'
Data variables:
math_scores (student, test) int64 32B 90 85 78 80
english_scores (student, test) int64 32B 88 90 75 82
>>> index_array = xr.DataArray([0, 2], dims="student")
>>> indexed_data = dataset.isel(student=index_array)
>>> indexed_data
<xarray.Dataset> Size: 224B
Dimensions: (student: 2, test: 3)
Coordinates:
* student (student) <U7 56B 'Alice' 'Charlie'
* test (test) <U6 72B 'Test 1' 'Test 2' 'Test 3'
Data variables:
math_scores (student, test) int64 48B 90 85 92 95 92 98
english_scores (student, test) int64 48B 88 90 92 93 96 91
See Also
--------
Dataset.sel
DataArray.isel
:doc:`xarray-tutorial:intermediate/indexing/indexing`
Tutorial material on indexing with Xarray objects
:doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic`
Tutorial material on basics of indexing
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
if any(is_fancy_indexer(idx) for idx in indexers.values()):
return self._isel_fancy(indexers, drop=drop, missing_dims=missing_dims)
# Much faster algorithm for when all indexers are ints, slices, one-dimensional
# lists, or zero or one-dimensional np.ndarray's
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
variables = {}
dims: dict[Hashable, int] = {}
coord_names = self._coord_names.copy()
indexes, index_variables = isel_indexes(self.xindexes, indexers)
for name, var in self._variables.items():
# preserve variable order
if name in index_variables:
var = index_variables[name]
else:
var_indexers = {k: v for k, v in indexers.items() if k in var.dims}
if var_indexers:
var = var.isel(var_indexers)
if drop and var.ndim == 0 and name in coord_names:
coord_names.remove(name)
continue
variables[name] = var
dims.update(zip(var.dims, var.shape, strict=True))
return self._construct_direct(
variables=variables,
coord_names=coord_names,
dims=dims,
attrs=self._attrs,
indexes=indexes,
encoding=self._encoding,
close=self._close,
)
| isel |
xarray | 51 | xarray/core/accessor_str.py | def join(
self,
dim: Hashable = None,
sep: str | bytes | Any = "",
) -> T_DataArray:
"""
Concatenate strings in a DataArray along a particular dimension.
An optional separator `sep` can also be specified. If `sep` is
array-like, it is broadcast against the array and applied elementwise.
Parameters
----------
dim : hashable, optional
Dimension along which the strings should be concatenated.
Only one dimension is allowed at a time.
Optional for 0D or 1D DataArrays, required for multidimensional DataArrays.
sep : str or array-like, default: "".
Separator to use between strings.
It is broadcast in the same way as the other input strings.
If array-like, its dimensions will be placed at the end of the output array dimensions.
Returns
-------
joined : same type as values
Examples
--------
Create an array
>>> values = xr.DataArray(
... [["a", "bab", "abc"], ["abcd", "", "abcdef"]],
... dims=["X", "Y"],
... )
Determine the separator
>>> seps = xr.DataArray(
... ["-", "_"],
... dims=["ZZ"],
... )
Join the strings along a given dimension
>>> values.str.join(dim="Y", sep=seps)
<xarray.DataArray (X: 2, ZZ: 2)> Size: 192B
array([['a-bab-abc', 'a_bab_abc'],
['abcd--abcdef', 'abcd__abcdef']], dtype='<U12')
Dimensions without coordinates: X, ZZ
See Also
--------
pandas.Series.str.join
str.join
"""
| /usr/src/app/target_test_cases/failed_tests_join.txt | def join(
self,
dim: Hashable = None,
sep: str | bytes | Any = "",
) -> T_DataArray:
"""
Concatenate strings in a DataArray along a particular dimension.
An optional separator `sep` can also be specified. If `sep` is
array-like, it is broadcast against the array and applied elementwise.
Parameters
----------
dim : hashable, optional
Dimension along which the strings should be concatenated.
Only one dimension is allowed at a time.
Optional for 0D or 1D DataArrays, required for multidimensional DataArrays.
sep : str or array-like, default: "".
Separator to use between strings.
It is broadcast in the same way as the other input strings.
If array-like, its dimensions will be placed at the end of the output array dimensions.
Returns
-------
joined : same type as values
Examples
--------
Create an array
>>> values = xr.DataArray(
... [["a", "bab", "abc"], ["abcd", "", "abcdef"]],
... dims=["X", "Y"],
... )
Determine the separator
>>> seps = xr.DataArray(
... ["-", "_"],
... dims=["ZZ"],
... )
Join the strings along a given dimension
>>> values.str.join(dim="Y", sep=seps)
<xarray.DataArray (X: 2, ZZ: 2)> Size: 192B
array([['a-bab-abc', 'a_bab_abc'],
['abcd--abcdef', 'abcd__abcdef']], dtype='<U12')
Dimensions without coordinates: X, ZZ
See Also
--------
pandas.Series.str.join
str.join
"""
if self._obj.ndim > 1 and dim is None:
raise ValueError("Dimension must be specified for multidimensional arrays.")
if self._obj.ndim > 1:
# Move the target dimension to the start and split along it
dimshifted = list(self._obj.transpose(dim, ...))
elif self._obj.ndim == 1:
dimshifted = list(self._obj)
else:
dimshifted = [self._obj]
start, *others = dimshifted
# concatenate the resulting arrays
return start.str.cat(*others, sep=sep)
| join |
xarray | 52 | xarray/plot/utils.py | def legend_elements(
self, prop="colors", num="auto", fmt=None, func=lambda x: x, **kwargs
):
"""
Create legend handles and labels for a PathCollection.
Each legend handle is a `.Line2D` representing the Path that was drawn,
and each label is a string what each Path represents.
This is useful for obtaining a legend for a `~.Axes.scatter` plot;
e.g.::
scatter = plt.scatter([1, 2, 3], [4, 5, 6], c=[7, 2, 3])
plt.legend(*scatter.legend_elements())
creates three legend elements, one for each color with the numerical
values passed to *c* as the labels.
Also see the :ref:`automatedlegendcreation` example.
Parameters
----------
prop : {"colors", "sizes"}, default: "colors"
If "colors", the legend handles will show the different colors of
the collection. If "sizes", the legend will show the different
sizes. To set both, use *kwargs* to directly edit the `.Line2D`
properties.
num : int, None, "auto" (default), array-like, or `~.ticker.Locator`
Target number of elements to create.
If None, use all unique elements of the mappable array. If an
integer, target to use *num* elements in the normed range.
If *"auto"*, try to determine which option better suits the nature
of the data.
The number of created elements may slightly deviate from *num* due
to a `~.ticker.Locator` being used to find useful locations.
If a list or array, use exactly those elements for the legend.
Finally, a `~.ticker.Locator` can be provided.
fmt : str, `~matplotlib.ticker.Formatter`, or None (default)
The format or formatter to use for the labels. If a string must be
a valid input for a `~.StrMethodFormatter`. If None (the default),
use a `~.ScalarFormatter`.
func : function, default: ``lambda x: x``
Function to calculate the labels. Often the size (or color)
argument to `~.Axes.scatter` will have been pre-processed by the
user using a function ``s = f(x)`` to make the markers visible;
e.g. ``size = np.log10(x)``. Providing the inverse of this
function here allows that pre-processing to be inverted, so that
the legend labels have the correct values; e.g. ``func = lambda
x: 10**x``.
**kwargs
Allowed keyword arguments are *color* and *size*. E.g. it may be
useful to set the color of the markers if *prop="sizes"* is used;
similarly to set the size of the markers if *prop="colors"* is
used. Any further parameters are passed onto the `.Line2D`
instance. This may be useful to e.g. specify a different
*markeredgecolor* or *alpha* for the legend handles.
Returns
-------
handles : list of `.Line2D`
Visual representation of each element of the legend.
labels : list of str
The string labels for elements of the legend.
"""
| /usr/src/app/target_test_cases/failed_tests_legend_elements.txt | def legend_elements(
self, prop="colors", num="auto", fmt=None, func=lambda x: x, **kwargs
):
"""
Create legend handles and labels for a PathCollection.
Each legend handle is a `.Line2D` representing the Path that was drawn,
and each label is a string what each Path represents.
This is useful for obtaining a legend for a `~.Axes.scatter` plot;
e.g.::
scatter = plt.scatter([1, 2, 3], [4, 5, 6], c=[7, 2, 3])
plt.legend(*scatter.legend_elements())
creates three legend elements, one for each color with the numerical
values passed to *c* as the labels.
Also see the :ref:`automatedlegendcreation` example.
Parameters
----------
prop : {"colors", "sizes"}, default: "colors"
If "colors", the legend handles will show the different colors of
the collection. If "sizes", the legend will show the different
sizes. To set both, use *kwargs* to directly edit the `.Line2D`
properties.
num : int, None, "auto" (default), array-like, or `~.ticker.Locator`
Target number of elements to create.
If None, use all unique elements of the mappable array. If an
integer, target to use *num* elements in the normed range.
If *"auto"*, try to determine which option better suits the nature
of the data.
The number of created elements may slightly deviate from *num* due
to a `~.ticker.Locator` being used to find useful locations.
If a list or array, use exactly those elements for the legend.
Finally, a `~.ticker.Locator` can be provided.
fmt : str, `~matplotlib.ticker.Formatter`, or None (default)
The format or formatter to use for the labels. If a string must be
a valid input for a `~.StrMethodFormatter`. If None (the default),
use a `~.ScalarFormatter`.
func : function, default: ``lambda x: x``
Function to calculate the labels. Often the size (or color)
argument to `~.Axes.scatter` will have been pre-processed by the
user using a function ``s = f(x)`` to make the markers visible;
e.g. ``size = np.log10(x)``. Providing the inverse of this
function here allows that pre-processing to be inverted, so that
the legend labels have the correct values; e.g. ``func = lambda
x: 10**x``.
**kwargs
Allowed keyword arguments are *color* and *size*. E.g. it may be
useful to set the color of the markers if *prop="sizes"* is used;
similarly to set the size of the markers if *prop="colors"* is
used. Any further parameters are passed onto the `.Line2D`
instance. This may be useful to e.g. specify a different
*markeredgecolor* or *alpha* for the legend handles.
Returns
-------
handles : list of `.Line2D`
Visual representation of each element of the legend.
labels : list of str
The string labels for elements of the legend.
"""
import warnings
import matplotlib as mpl
mlines = mpl.lines
handles = []
labels = []
if prop == "colors":
arr = self.get_array()
if arr is None:
warnings.warn(
"Collection without array used. Make sure to "
"specify the values to be colormapped via the "
"`c` argument.",
stacklevel=2,
)
return handles, labels
_size = kwargs.pop("size", mpl.rcParams["lines.markersize"])
def _get_color_and_size(value):
return self.cmap(self.norm(value)), _size
elif prop == "sizes":
if isinstance(self, mpl.collections.LineCollection):
arr = self.get_linewidths()
else:
arr = self.get_sizes()
_color = kwargs.pop("color", "k")
def _get_color_and_size(value):
return _color, np.sqrt(value)
else:
raise ValueError(
"Valid values for `prop` are 'colors' or "
f"'sizes'. You supplied '{prop}' instead."
)
# Get the unique values and their labels:
values = np.unique(arr)
label_values = np.asarray(func(values))
label_values_are_numeric = np.issubdtype(label_values.dtype, np.number)
# Handle the label format:
if fmt is None and label_values_are_numeric:
fmt = mpl.ticker.ScalarFormatter(useOffset=False, useMathText=True)
elif fmt is None and not label_values_are_numeric:
fmt = mpl.ticker.StrMethodFormatter("{x}")
elif isinstance(fmt, str):
fmt = mpl.ticker.StrMethodFormatter(fmt)
fmt.create_dummy_axis()
if num == "auto":
num = 9
if len(values) <= num:
num = None
if label_values_are_numeric:
label_values_min = label_values.min()
label_values_max = label_values.max()
fmt.axis.set_view_interval(label_values_min, label_values_max)
fmt.axis.set_data_interval(label_values_min, label_values_max)
if num is not None:
# Labels are numerical but larger than the target
# number of elements, reduce to target using matplotlibs
# ticker classes:
if isinstance(num, mpl.ticker.Locator):
loc = num
elif np.iterable(num):
loc = mpl.ticker.FixedLocator(num)
else:
num = int(num)
loc = mpl.ticker.MaxNLocator(
nbins=num, min_n_ticks=num - 1, steps=[1, 2, 2.5, 3, 5, 6, 8, 10]
)
# Get nicely spaced label_values:
label_values = loc.tick_values(label_values_min, label_values_max)
# Remove extrapolated label_values:
cond = (label_values >= label_values_min) & (
label_values <= label_values_max
)
label_values = label_values[cond]
# Get the corresponding values by creating a linear interpolant
# with small step size:
values_interp = np.linspace(values.min(), values.max(), 256)
label_values_interp = func(values_interp)
ix = np.argsort(label_values_interp)
values = np.interp(label_values, label_values_interp[ix], values_interp[ix])
elif num is not None and not label_values_are_numeric:
# Labels are not numerical so modifying label_values is not
# possible, instead filter the array with nicely distributed
# indexes:
if type(num) == int: # noqa: E721
loc = mpl.ticker.LinearLocator(num)
else:
raise ValueError("`num` only supports integers for non-numeric labels.")
ind = loc.tick_values(0, len(label_values) - 1).astype(int)
label_values = label_values[ind]
values = values[ind]
# Some formatters requires set_locs:
if hasattr(fmt, "set_locs"):
fmt.set_locs(label_values)
# Default settings for handles, add or override with kwargs:
kw = dict(markeredgewidth=self.get_linewidths()[0], alpha=self.get_alpha())
kw.update(kwargs)
for val, lab in zip(values, label_values, strict=True):
color, size = _get_color_and_size(val)
if isinstance(self, mpl.collections.PathCollection):
kw.update(linestyle="", marker=self.get_paths()[0], markersize=size)
elif isinstance(self, mpl.collections.LineCollection):
kw.update(linestyle=self.get_linestyle()[0], linewidth=size)
h = mlines.Line2D([0], [0], color=color, **kw)
handles.append(h)
labels.append(fmt(lab))
return handles, labels
| legend_elements |
xarray | 53 | xarray/core/datatree.py | def map( # type: ignore[override]
self,
func: Callable,
keep_attrs: bool | None = None,
args: Iterable[Any] = (),
**kwargs: Any,
) -> Dataset:
"""Apply a function to each data variable in this dataset
Parameters
----------
func : callable
Function which can be called in the form `func(x, *args, **kwargs)`
to transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool | None, optional
If True, both the dataset's and variables' attributes (`attrs`) will be
copied from the original objects to the new ones. If False, the new dataset
and variables will be returned without copying the attributes.
args : iterable, optional
Positional arguments passed on to `func`.
**kwargs : Any
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` to each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])})
>>> ds
<xarray.Dataset> Size: 64B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 16B -1 2
>>> ds.map(np.fabs)
<xarray.Dataset> Size: 64B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 0.9773
bar (x) float64 16B 1.0 2.0
"""
| /usr/src/app/target_test_cases/failed_tests_map.txt | def map( # type: ignore[override]
self,
func: Callable,
keep_attrs: bool | None = None,
args: Iterable[Any] = (),
**kwargs: Any,
) -> Dataset:
"""Apply a function to each data variable in this dataset
Parameters
----------
func : callable
Function which can be called in the form `func(x, *args, **kwargs)`
to transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool | None, optional
If True, both the dataset's and variables' attributes (`attrs`) will be
copied from the original objects to the new ones. If False, the new dataset
and variables will be returned without copying the attributes.
args : iterable, optional
Positional arguments passed on to `func`.
**kwargs : Any
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` to each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])})
>>> ds
<xarray.Dataset> Size: 64B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 16B -1 2
>>> ds.map(np.fabs)
<xarray.Dataset> Size: 64B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 0.9773
bar (x) float64 16B 1.0 2.0
"""
# Copied from xarray.Dataset so as not to call type(self), which causes problems (see https://github.com/xarray-contrib/datatree/issues/188).
# TODO Refactor xarray upstream to avoid needing to overwrite this.
# TODO This copied version will drop all attrs - the keep_attrs stuff should be re-instated
variables = {
k: maybe_wrap_array(v, func(v, *args, **kwargs))
for k, v in self.data_vars.items()
}
# return type(self)(variables, attrs=attrs)
return Dataset(variables)
| map |
xarray | 54 | xarray/core/parallel.py | def map_blocks(
func: Callable[..., T_Xarray],
obj: DataArray | Dataset,
args: Sequence[Any] = (),
kwargs: Mapping[str, Any] | None = None,
template: DataArray | Dataset | None = None,
) -> T_Xarray:
"""Apply a function to each block of a DataArray or Dataset.
.. warning::
This function is experimental and its signature may change.
Parameters
----------
func : callable
User-provided function that accepts a DataArray or Dataset as its first
parameter ``obj``. The function will receive a subset or 'block' of ``obj`` (see below),
corresponding to one chunk along each chunked dimension. ``func`` will be
executed as ``func(subset_obj, *subset_args, **kwargs)``.
This function must return either a single DataArray or a single Dataset.
This function cannot add a new chunked dimension.
obj : DataArray, Dataset
Passed to the function as its first argument, one block at a time.
args : sequence
Passed to func after unpacking and subsetting any xarray objects by blocks.
xarray objects in args must be aligned with obj, otherwise an error is raised.
kwargs : mapping
Passed verbatim to func after unpacking. xarray objects, if any, will not be
subset to blocks. Passing dask collections in kwargs is not allowed.
template : DataArray or Dataset, optional
xarray object representing the final result after compute is called. If not provided,
the function will be first run on mocked-up data, that looks like ``obj`` but
has sizes 0, to determine properties of the returned object such as dtype,
variable names, attributes, new dimensions and new indexes (if any).
``template`` must be provided if the function changes the size of existing dimensions.
When provided, ``attrs`` on variables in `template` are copied over to the result. Any
``attrs`` set by ``func`` will be ignored.
Returns
-------
obj : same as obj
A single DataArray or Dataset with dask backend, reassembled from the outputs of the
function.
Notes
-----
This function is designed for when ``func`` needs to manipulate a whole xarray object
subset to each block. Each block is loaded into memory. In the more common case where
``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``.
If none of the variables in ``obj`` is backed by dask arrays, calling this function is
equivalent to calling ``func(obj, *args, **kwargs)``.
See Also
--------
dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks
xarray.DataArray.map_blocks
Examples
--------
Calculate an anomaly from climatology using ``.groupby()``. Using
``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``,
its indices, and its methods like ``.groupby()``.
>>> def calculate_anomaly(da, groupby_type="time.month"):
... gb = da.groupby(groupby_type)
... clim = gb.mean(dim="time")
... return gb - clim
...
>>> time = xr.cftime_range("1990-01", "1992-01", freq="ME")
>>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"])
>>> np.random.seed(123)
>>> array = xr.DataArray(
... np.random.rand(len(time)),
... dims=["time"],
... coords={"time": time, "month": month},
... ).chunk()
>>> array.map_blocks(calculate_anomaly, template=array).compute()
<xarray.DataArray (time: 24)> Size: 192B
array([ 0.12894847, 0.11323072, -0.0855964 , -0.09334032, 0.26848862,
0.12382735, 0.22460641, 0.07650108, -0.07673453, -0.22865714,
-0.19063865, 0.0590131 , -0.12894847, -0.11323072, 0.0855964 ,
0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108,
0.07673453, 0.22865714, 0.19063865, -0.0590131 ])
Coordinates:
* time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12
Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments
to the function being applied in ``xr.map_blocks()``:
>>> array.map_blocks(
... calculate_anomaly,
... kwargs={"groupby_type": "time.year"},
... template=array,
... ) # doctest: +ELLIPSIS
<xarray.DataArray (time: 24)> Size: 192B
dask.array<<this-array>-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray>
Coordinates:
* time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 192B dask.array<chunksize=(24,), meta=np.ndarray>
"""
| /usr/src/app/target_test_cases/failed_tests_map_blocks.txt | def map_blocks(
func: Callable[..., T_Xarray],
obj: DataArray | Dataset,
args: Sequence[Any] = (),
kwargs: Mapping[str, Any] | None = None,
template: DataArray | Dataset | None = None,
) -> T_Xarray:
"""Apply a function to each block of a DataArray or Dataset.
.. warning::
This function is experimental and its signature may change.
Parameters
----------
func : callable
User-provided function that accepts a DataArray or Dataset as its first
parameter ``obj``. The function will receive a subset or 'block' of ``obj`` (see below),
corresponding to one chunk along each chunked dimension. ``func`` will be
executed as ``func(subset_obj, *subset_args, **kwargs)``.
This function must return either a single DataArray or a single Dataset.
This function cannot add a new chunked dimension.
obj : DataArray, Dataset
Passed to the function as its first argument, one block at a time.
args : sequence
Passed to func after unpacking and subsetting any xarray objects by blocks.
xarray objects in args must be aligned with obj, otherwise an error is raised.
kwargs : mapping
Passed verbatim to func after unpacking. xarray objects, if any, will not be
subset to blocks. Passing dask collections in kwargs is not allowed.
template : DataArray or Dataset, optional
xarray object representing the final result after compute is called. If not provided,
the function will be first run on mocked-up data, that looks like ``obj`` but
has sizes 0, to determine properties of the returned object such as dtype,
variable names, attributes, new dimensions and new indexes (if any).
``template`` must be provided if the function changes the size of existing dimensions.
When provided, ``attrs`` on variables in `template` are copied over to the result. Any
``attrs`` set by ``func`` will be ignored.
Returns
-------
obj : same as obj
A single DataArray or Dataset with dask backend, reassembled from the outputs of the
function.
Notes
-----
This function is designed for when ``func`` needs to manipulate a whole xarray object
subset to each block. Each block is loaded into memory. In the more common case where
``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``.
If none of the variables in ``obj`` is backed by dask arrays, calling this function is
equivalent to calling ``func(obj, *args, **kwargs)``.
See Also
--------
dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks
xarray.DataArray.map_blocks
Examples
--------
Calculate an anomaly from climatology using ``.groupby()``. Using
``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``,
its indices, and its methods like ``.groupby()``.
>>> def calculate_anomaly(da, groupby_type="time.month"):
... gb = da.groupby(groupby_type)
... clim = gb.mean(dim="time")
... return gb - clim
...
>>> time = xr.cftime_range("1990-01", "1992-01", freq="ME")
>>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"])
>>> np.random.seed(123)
>>> array = xr.DataArray(
... np.random.rand(len(time)),
... dims=["time"],
... coords={"time": time, "month": month},
... ).chunk()
>>> array.map_blocks(calculate_anomaly, template=array).compute()
<xarray.DataArray (time: 24)> Size: 192B
array([ 0.12894847, 0.11323072, -0.0855964 , -0.09334032, 0.26848862,
0.12382735, 0.22460641, 0.07650108, -0.07673453, -0.22865714,
-0.19063865, 0.0590131 , -0.12894847, -0.11323072, 0.0855964 ,
0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108,
0.07673453, 0.22865714, 0.19063865, -0.0590131 ])
Coordinates:
* time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12
Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments
to the function being applied in ``xr.map_blocks()``:
>>> array.map_blocks(
... calculate_anomaly,
... kwargs={"groupby_type": "time.year"},
... template=array,
... ) # doctest: +ELLIPSIS
<xarray.DataArray (time: 24)> Size: 192B
dask.array<<this-array>-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray>
Coordinates:
* time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 192B dask.array<chunksize=(24,), meta=np.ndarray>
"""
def _wrapper(
func: Callable,
args: list,
kwargs: dict,
arg_is_array: Iterable[bool],
expected: ExpectedDict,
):
"""
Wrapper function that receives datasets in args; converts to dataarrays when necessary;
passes these to the user function `func` and checks returned objects for expected shapes/sizes/etc.
"""
converted_args = [
dataset_to_dataarray(arg) if is_array else arg
for is_array, arg in zip(arg_is_array, args, strict=True)
]
result = func(*converted_args, **kwargs)
merged_coordinates = merge(
[arg.coords for arg in args if isinstance(arg, Dataset | DataArray)]
).coords
# check all dims are present
missing_dimensions = set(expected["shapes"]) - set(result.sizes)
if missing_dimensions:
raise ValueError(
f"Dimensions {missing_dimensions} missing on returned object."
)
# check that index lengths and values are as expected
for name, index in result._indexes.items():
if name in expected["shapes"]:
if result.sizes[name] != expected["shapes"][name]:
raise ValueError(
f"Received dimension {name!r} of length {result.sizes[name]}. "
f"Expected length {expected['shapes'][name]}."
)
# ChainMap wants MutableMapping, but xindexes is Mapping
merged_indexes = collections.ChainMap(
expected["indexes"], merged_coordinates.xindexes # type: ignore[arg-type]
)
expected_index = merged_indexes.get(name, None)
if expected_index is not None and not index.equals(expected_index):
raise ValueError(
f"Expected index {name!r} to be {expected_index!r}. Received {index!r} instead."
)
# check that all expected variables were returned
check_result_variables(result, expected, "coords")
if isinstance(result, Dataset):
check_result_variables(result, expected, "data_vars")
return make_dict(result)
if template is not None and not isinstance(template, DataArray | Dataset):
raise TypeError(
f"template must be a DataArray or Dataset. Received {type(template).__name__} instead."
)
if not isinstance(args, Sequence):
raise TypeError("args must be a sequence (for example, a list or tuple).")
if kwargs is None:
kwargs = {}
elif not isinstance(kwargs, Mapping):
raise TypeError("kwargs must be a mapping (for example, a dict)")
for value in kwargs.values():
if is_dask_collection(value):
raise TypeError(
"Cannot pass dask collections in kwargs yet. Please compute or "
"load values before passing to map_blocks."
)
if not is_dask_collection(obj):
return func(obj, *args, **kwargs)
try:
import dask
import dask.array
from dask.highlevelgraph import HighLevelGraph
except ImportError:
pass
all_args = [obj] + list(args)
is_xarray = [isinstance(arg, Dataset | DataArray) for arg in all_args]
is_array = [isinstance(arg, DataArray) for arg in all_args]
# there should be a better way to group this. partition?
xarray_indices, xarray_objs = unzip(
(index, arg) for index, arg in enumerate(all_args) if is_xarray[index]
)
others = [
(index, arg) for index, arg in enumerate(all_args) if not is_xarray[index]
]
# all xarray objects must be aligned. This is consistent with apply_ufunc.
aligned = align(*xarray_objs, join="exact")
xarray_objs = tuple(
dataarray_to_dataset(arg) if isinstance(arg, DataArray) else arg
for arg in aligned
)
# rechunk any numpy variables appropriately
xarray_objs = tuple(arg.chunk(arg.chunksizes) for arg in xarray_objs)
merged_coordinates = merge([arg.coords for arg in aligned]).coords
_, npargs = unzip(
sorted(
list(zip(xarray_indices, xarray_objs, strict=True)) + others,
key=lambda x: x[0],
)
)
# check that chunk sizes are compatible
input_chunks = dict(npargs[0].chunks)
for arg in xarray_objs[1:]:
assert_chunks_compatible(npargs[0], arg)
input_chunks.update(arg.chunks)
coordinates: Coordinates
if template is None:
# infer template by providing zero-shaped arrays
template = infer_template(func, aligned[0], *args, **kwargs)
template_coords = set(template.coords)
preserved_coord_vars = template_coords & set(merged_coordinates)
new_coord_vars = template_coords - set(merged_coordinates)
preserved_coords = merged_coordinates.to_dataset()[preserved_coord_vars]
# preserved_coords contains all coordinates variables that share a dimension
# with any index variable in preserved_indexes
# Drop any unneeded vars in a second pass, this is required for e.g.
# if the mapped function were to drop a non-dimension coordinate variable.
preserved_coords = preserved_coords.drop_vars(
tuple(k for k in preserved_coords.variables if k not in template_coords)
)
coordinates = merge(
(preserved_coords, template.coords.to_dataset()[new_coord_vars])
).coords
output_chunks: Mapping[Hashable, tuple[int, ...]] = {
dim: input_chunks[dim] for dim in template.dims if dim in input_chunks
}
else:
# template xarray object has been provided with proper sizes and chunk shapes
coordinates = template.coords
output_chunks = template.chunksizes
if not output_chunks:
raise ValueError(
"Provided template has no dask arrays. "
" Please construct a template with appropriately chunked dask arrays."
)
new_indexes = set(template.xindexes) - set(merged_coordinates)
modified_indexes = set(
name
for name, xindex in coordinates.xindexes.items()
if not xindex.equals(merged_coordinates.xindexes.get(name, None))
)
for dim in output_chunks:
if dim in input_chunks and len(input_chunks[dim]) != len(output_chunks[dim]):
raise ValueError(
"map_blocks requires that one block of the input maps to one block of output. "
f"Expected number of output chunks along dimension {dim!r} to be {len(input_chunks[dim])}. "
f"Received {len(output_chunks[dim])} instead. Please provide template if not provided, or "
"fix the provided template."
)
if isinstance(template, DataArray):
result_is_array = True
template_name = template.name
template = template._to_temp_dataset()
elif isinstance(template, Dataset):
result_is_array = False
else:
raise TypeError(
f"func output must be DataArray or Dataset; got {type(template)}"
)
# We're building a new HighLevelGraph hlg. We'll have one new layer
# for each variable in the dataset, which is the result of the
# func applied to the values.
graph: dict[Any, Any] = {}
new_layers: collections.defaultdict[str, dict[Any, Any]] = collections.defaultdict(
dict
)
gname = f"{dask.utils.funcname(func)}-{dask.base.tokenize(npargs[0], args, kwargs)}"
# map dims to list of chunk indexes
ichunk = {dim: range(len(chunks_v)) for dim, chunks_v in input_chunks.items()}
# mapping from chunk index to slice bounds
input_chunk_bounds = {
dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in input_chunks.items()
}
output_chunk_bounds = {
dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in output_chunks.items()
}
computed_variables = set(template.variables) - set(coordinates.indexes)
# iterate over all possible chunk combinations
for chunk_tuple in itertools.product(*ichunk.values()):
# mapping from dimension name to chunk index
chunk_index = dict(zip(ichunk.keys(), chunk_tuple, strict=True))
blocked_args = [
(
subset_dataset_to_block(
graph, gname, arg, input_chunk_bounds, chunk_index
)
if isxr
else arg
)
for isxr, arg in zip(is_xarray, npargs, strict=True)
]
# raise nice error messages in _wrapper
expected: ExpectedDict = {
# input chunk 0 along a dimension maps to output chunk 0 along the same dimension
# even if length of dimension is changed by the applied function
"shapes": {
k: output_chunks[k][v]
for k, v in chunk_index.items()
if k in output_chunks
},
"data_vars": set(template.data_vars.keys()),
"coords": set(template.coords.keys()),
# only include new or modified indexes to minimize duplication of data, and graph size.
"indexes": {
dim: coordinates.xindexes[dim][
_get_chunk_slicer(dim, chunk_index, output_chunk_bounds)
]
for dim in (new_indexes | modified_indexes)
},
}
from_wrapper = (gname,) + chunk_tuple
graph[from_wrapper] = (_wrapper, func, blocked_args, kwargs, is_array, expected)
# mapping from variable name to dask graph key
var_key_map: dict[Hashable, str] = {}
for name in computed_variables:
variable = template.variables[name]
gname_l = f"{name}-{gname}"
var_key_map[name] = gname_l
# unchunked dimensions in the input have one chunk in the result
# output can have new dimensions with exactly one chunk
key: tuple[Any, ...] = (gname_l,) + tuple(
chunk_index[dim] if dim in chunk_index else 0 for dim in variable.dims
)
# We're adding multiple new layers to the graph:
# The first new layer is the result of the computation on
# the array.
# Then we add one layer per variable, which extracts the
# result for that variable, and depends on just the first new
# layer.
new_layers[gname_l][key] = (operator.getitem, from_wrapper, name)
hlg = HighLevelGraph.from_collections(
gname,
graph,
dependencies=[arg for arg in npargs if dask.is_dask_collection(arg)],
)
# This adds in the getitems for each variable in the dataset.
hlg = HighLevelGraph(
{**hlg.layers, **new_layers},
dependencies={
**hlg.dependencies,
**{name: {gname} for name in new_layers.keys()},
},
)
result = Dataset(coords=coordinates, attrs=template.attrs)
for index in result._indexes:
result[index].attrs = template[index].attrs
result[index].encoding = template[index].encoding
for name, gname_l in var_key_map.items():
dims = template[name].dims
var_chunks = []
for dim in dims:
if dim in output_chunks:
var_chunks.append(output_chunks[dim])
elif dim in result._indexes:
var_chunks.append((result.sizes[dim],))
elif dim in template.dims:
# new unindexed dimension
var_chunks.append((template.sizes[dim],))
data = dask.array.Array(
hlg, name=gname_l, chunks=var_chunks, dtype=template[name].dtype
)
result[name] = (dims, data, template[name].attrs)
result[name].encoding = template[name].encoding
result = result.set_coords(template._coord_names)
if result_is_array:
da = dataset_to_dataarray(result)
da.name = template_name
return da # type: ignore[return-value]
return result # type: ignore[return-value]
| map_blocks |
xarray | 55 | xarray/core/datatree.py | def match(self, pattern: str) -> DataTree:
"""
Return nodes with paths matching pattern.
Uses unix glob-like syntax for pattern-matching.
Parameters
----------
pattern: str
A pattern to match each node path against.
Returns
-------
DataTree
See Also
--------
filter
pipe
map_over_subtree
Examples
--------
>>> dt = DataTree.from_dict(
... {
... "/a/A": None,
... "/a/B": None,
... "/b/A": None,
... "/b/B": None,
... }
... )
>>> dt.match("*/B")
<xarray.DataTree>
Group: /
├── Group: /a
│ └── Group: /a/B
└── Group: /b
└── Group: /b/B
"""
| /usr/src/app/target_test_cases/failed_tests_match.txt | def match(self, pattern: str) -> DataTree:
"""
Return nodes with paths matching pattern.
Uses unix glob-like syntax for pattern-matching.
Parameters
----------
pattern: str
A pattern to match each node path against.
Returns
-------
DataTree
See Also
--------
filter
pipe
map_over_subtree
Examples
--------
>>> dt = DataTree.from_dict(
... {
... "/a/A": None,
... "/a/B": None,
... "/b/A": None,
... "/b/B": None,
... }
... )
>>> dt.match("*/B")
<xarray.DataTree>
Group: /
├── Group: /a
│ └── Group: /a/B
└── Group: /b
└── Group: /b/B
"""
matching_nodes = {
node.path: node.dataset
for node in self.subtree
if NodePath(node.path).match(pattern)
}
return DataTree.from_dict(matching_nodes, name=self.root.name)
| match |
xarray | 56 | xarray/core/_aggregations.py | def max(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``max`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``max`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``max`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.max
dask.array.max
DataArray.max
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.max()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 3.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.max(skipna=False)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B nan
"""
| /usr/src/app/target_test_cases/failed_tests_max.txt | def max(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``max`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``max`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``max`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.max
dask.array.max
DataArray.max
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.max()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 3.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.max(skipna=False)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B nan
"""
return self.reduce(
duck_array_ops.max,
dim=dim,
skipna=skipna,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
| max |
xarray | 57 | xarray/core/_aggregations.py | def mean(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``mean`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``mean`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``mean`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.mean
dask.array.mean
DataArray.mean
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.mean()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 1.6
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.mean(skipna=False)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B nan
"""
| /usr/src/app/target_test_cases/failed_tests_mean.txt | def mean(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``mean`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``mean`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``mean`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.mean
dask.array.mean
DataArray.mean
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.mean()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 1.6
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.mean(skipna=False)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B nan
"""
return self.reduce(
duck_array_ops.mean,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
| mean |
xarray | 58 | xarray/core/merge.py | def merge(
objects: Iterable[DataArray | CoercibleMapping],
compat: CompatOptions = "no_conflicts",
join: JoinOptions = "outer",
fill_value: object = dtypes.NA,
combine_attrs: CombineAttrsOptions = "override",
) -> Dataset:
"""Merge any number of xarray objects into a single Dataset as variables.
Parameters
----------
objects : iterable of Dataset or iterable of DataArray or iterable of dict-like
Merge together all variables from these objects. If any of them are
DataArray objects, they must have a name.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", \
"override", "minimal"}, default: "no_conflicts"
String indicating how to compare variables of the same name for
potential conflicts:
- "identical": all values, dimensions and attributes must be the
same.
- "equals": all values and dimensions must be the same.
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
- "minimal": drop conflicting coordinates
join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer"
String indicating how to combine differing indexes in objects.
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "override"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
Dataset
Dataset with combined variables from each object.
Examples
--------
>>> x = xr.DataArray(
... [[1.0, 2.0], [3.0, 5.0]],
... dims=("lat", "lon"),
... coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]},
... name="var1",
... )
>>> y = xr.DataArray(
... [[5.0, 6.0], [7.0, 8.0]],
... dims=("lat", "lon"),
... coords={"lat": [35.0, 42.0], "lon": [100.0, 150.0]},
... name="var2",
... )
>>> z = xr.DataArray(
... [[0.0, 3.0], [4.0, 9.0]],
... dims=("time", "lon"),
... coords={"time": [30.0, 60.0], "lon": [100.0, 150.0]},
... name="var3",
... )
>>> x
<xarray.DataArray 'var1' (lat: 2, lon: 2)> Size: 32B
array([[1., 2.],
[3., 5.]])
Coordinates:
* lat (lat) float64 16B 35.0 40.0
* lon (lon) float64 16B 100.0 120.0
>>> y
<xarray.DataArray 'var2' (lat: 2, lon: 2)> Size: 32B
array([[5., 6.],
[7., 8.]])
Coordinates:
* lat (lat) float64 16B 35.0 42.0
* lon (lon) float64 16B 100.0 150.0
>>> z
<xarray.DataArray 'var3' (time: 2, lon: 2)> Size: 32B
array([[0., 3.],
[4., 9.]])
Coordinates:
* time (time) float64 16B 30.0 60.0
* lon (lon) float64 16B 100.0 150.0
>>> xr.merge([x, y, z])
<xarray.Dataset> Size: 256B
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 24B 100.0 120.0 150.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], compat="identical")
<xarray.Dataset> Size: 256B
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 24B 100.0 120.0 150.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], compat="equals")
<xarray.Dataset> Size: 256B
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 24B 100.0 120.0 150.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], compat="equals", fill_value=-999.0)
<xarray.Dataset> Size: 256B
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 24B 100.0 120.0 150.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 72B 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0
var2 (lat, lon) float64 72B 5.0 -999.0 6.0 -999.0 ... 7.0 -999.0 8.0
var3 (time, lon) float64 48B 0.0 -999.0 3.0 4.0 -999.0 9.0
>>> xr.merge([x, y, z], join="override")
<xarray.Dataset> Size: 144B
Dimensions: (lat: 2, lon: 2, time: 2)
Coordinates:
* lat (lat) float64 16B 35.0 40.0
* lon (lon) float64 16B 100.0 120.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 32B 1.0 2.0 3.0 5.0
var2 (lat, lon) float64 32B 5.0 6.0 7.0 8.0
var3 (time, lon) float64 32B 0.0 3.0 4.0 9.0
>>> xr.merge([x, y, z], join="inner")
<xarray.Dataset> Size: 64B
Dimensions: (lat: 1, lon: 1, time: 2)
Coordinates:
* lat (lat) float64 8B 35.0
* lon (lon) float64 8B 100.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 8B 1.0
var2 (lat, lon) float64 8B 5.0
var3 (time, lon) float64 16B 0.0 4.0
>>> xr.merge([x, y, z], compat="identical", join="inner")
<xarray.Dataset> Size: 64B
Dimensions: (lat: 1, lon: 1, time: 2)
Coordinates:
* lat (lat) float64 8B 35.0
* lon (lon) float64 8B 100.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 8B 1.0
var2 (lat, lon) float64 8B 5.0
var3 (time, lon) float64 16B 0.0 4.0
>>> xr.merge([x, y, z], compat="broadcast_equals", join="outer")
<xarray.Dataset> Size: 256B
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 24B 100.0 120.0 150.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], join="exact")
Traceback (most recent call last):
...
ValueError: cannot align objects with join='exact' where ...
Raises
------
xarray.MergeError
If any variables with the same name have conflicting values.
See also
--------
concat
combine_nested
combine_by_coords
"""
| /usr/src/app/target_test_cases/failed_tests_merge.txt | def merge(
objects: Iterable[DataArray | CoercibleMapping],
compat: CompatOptions = "no_conflicts",
join: JoinOptions = "outer",
fill_value: object = dtypes.NA,
combine_attrs: CombineAttrsOptions = "override",
) -> Dataset:
"""Merge any number of xarray objects into a single Dataset as variables.
Parameters
----------
objects : iterable of Dataset or iterable of DataArray or iterable of dict-like
Merge together all variables from these objects. If any of them are
DataArray objects, they must have a name.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", \
"override", "minimal"}, default: "no_conflicts"
String indicating how to compare variables of the same name for
potential conflicts:
- "identical": all values, dimensions and attributes must be the
same.
- "equals": all values and dimensions must be the same.
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
- "minimal": drop conflicting coordinates
join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer"
String indicating how to combine differing indexes in objects.
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "override"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
Dataset
Dataset with combined variables from each object.
Examples
--------
>>> x = xr.DataArray(
... [[1.0, 2.0], [3.0, 5.0]],
... dims=("lat", "lon"),
... coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]},
... name="var1",
... )
>>> y = xr.DataArray(
... [[5.0, 6.0], [7.0, 8.0]],
... dims=("lat", "lon"),
... coords={"lat": [35.0, 42.0], "lon": [100.0, 150.0]},
... name="var2",
... )
>>> z = xr.DataArray(
... [[0.0, 3.0], [4.0, 9.0]],
... dims=("time", "lon"),
... coords={"time": [30.0, 60.0], "lon": [100.0, 150.0]},
... name="var3",
... )
>>> x
<xarray.DataArray 'var1' (lat: 2, lon: 2)> Size: 32B
array([[1., 2.],
[3., 5.]])
Coordinates:
* lat (lat) float64 16B 35.0 40.0
* lon (lon) float64 16B 100.0 120.0
>>> y
<xarray.DataArray 'var2' (lat: 2, lon: 2)> Size: 32B
array([[5., 6.],
[7., 8.]])
Coordinates:
* lat (lat) float64 16B 35.0 42.0
* lon (lon) float64 16B 100.0 150.0
>>> z
<xarray.DataArray 'var3' (time: 2, lon: 2)> Size: 32B
array([[0., 3.],
[4., 9.]])
Coordinates:
* time (time) float64 16B 30.0 60.0
* lon (lon) float64 16B 100.0 150.0
>>> xr.merge([x, y, z])
<xarray.Dataset> Size: 256B
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 24B 100.0 120.0 150.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], compat="identical")
<xarray.Dataset> Size: 256B
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 24B 100.0 120.0 150.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], compat="equals")
<xarray.Dataset> Size: 256B
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 24B 100.0 120.0 150.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], compat="equals", fill_value=-999.0)
<xarray.Dataset> Size: 256B
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 24B 100.0 120.0 150.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 72B 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0
var2 (lat, lon) float64 72B 5.0 -999.0 6.0 -999.0 ... 7.0 -999.0 8.0
var3 (time, lon) float64 48B 0.0 -999.0 3.0 4.0 -999.0 9.0
>>> xr.merge([x, y, z], join="override")
<xarray.Dataset> Size: 144B
Dimensions: (lat: 2, lon: 2, time: 2)
Coordinates:
* lat (lat) float64 16B 35.0 40.0
* lon (lon) float64 16B 100.0 120.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 32B 1.0 2.0 3.0 5.0
var2 (lat, lon) float64 32B 5.0 6.0 7.0 8.0
var3 (time, lon) float64 32B 0.0 3.0 4.0 9.0
>>> xr.merge([x, y, z], join="inner")
<xarray.Dataset> Size: 64B
Dimensions: (lat: 1, lon: 1, time: 2)
Coordinates:
* lat (lat) float64 8B 35.0
* lon (lon) float64 8B 100.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 8B 1.0
var2 (lat, lon) float64 8B 5.0
var3 (time, lon) float64 16B 0.0 4.0
>>> xr.merge([x, y, z], compat="identical", join="inner")
<xarray.Dataset> Size: 64B
Dimensions: (lat: 1, lon: 1, time: 2)
Coordinates:
* lat (lat) float64 8B 35.0
* lon (lon) float64 8B 100.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 8B 1.0
var2 (lat, lon) float64 8B 5.0
var3 (time, lon) float64 16B 0.0 4.0
>>> xr.merge([x, y, z], compat="broadcast_equals", join="outer")
<xarray.Dataset> Size: 256B
Dimensions: (lat: 3, lon: 3, time: 2)
Coordinates:
* lat (lat) float64 24B 35.0 40.0 42.0
* lon (lon) float64 24B 100.0 120.0 150.0
* time (time) float64 16B 30.0 60.0
Data variables:
var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
>>> xr.merge([x, y, z], join="exact")
Traceback (most recent call last):
...
ValueError: cannot align objects with join='exact' where ...
Raises
------
xarray.MergeError
If any variables with the same name have conflicting values.
See also
--------
concat
combine_nested
combine_by_coords
"""
from xarray.core.coordinates import Coordinates
from xarray.core.dataarray import DataArray
from xarray.core.dataset import Dataset
dict_like_objects = []
for obj in objects:
if not isinstance(obj, DataArray | Dataset | Coordinates | dict):
raise TypeError(
"objects must be an iterable containing only "
"Dataset(s), DataArray(s), and dictionaries."
)
if isinstance(obj, DataArray):
obj = obj.to_dataset(promote_attrs=True)
elif isinstance(obj, Coordinates):
obj = obj.to_dataset()
dict_like_objects.append(obj)
merge_result = merge_core(
dict_like_objects,
compat,
join,
combine_attrs=combine_attrs,
fill_value=fill_value,
)
return Dataset._construct_direct(**merge_result._asdict())
| merge |
xarray | 59 | xarray/core/_aggregations.py | def min(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``min`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``min`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``min`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.min
dask.array.min
DataArray.min
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.min()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 0.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.min(skipna=False)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B nan
"""
| /usr/src/app/target_test_cases/failed_tests_min.txt | def min(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``min`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``min`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``min`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.min
dask.array.min
DataArray.min
:ref:`agg`
User guide on reduction or aggregation operations.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.min()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 0.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.min(skipna=False)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B nan
"""
return self.reduce(
duck_array_ops.min,
dim=dim,
skipna=skipna,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
)
| min |
xarray | 60 | xarray/backends/api.py | def open_dataarray(
filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
*,
engine: T_Engine | None = None,
chunks: T_Chunks | None = None,
cache: bool | None = None,
decode_cf: bool | None = None,
mask_and_scale: bool | None = None,
decode_times: bool | None = None,
decode_timedelta: bool | None = None,
use_cftime: bool | None = None,
concat_characters: bool | None = None,
decode_coords: Literal["coordinates", "all"] | bool | None = None,
drop_variables: str | Iterable[str] | None = None,
inline_array: bool = False,
chunked_array_type: str | None = None,
from_array_kwargs: dict[str, Any] | None = None,
backend_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> DataArray:
"""Open an DataArray from a file or file-like object containing a single
data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
, installed backend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4".
chunks : int, dict, 'auto' or None, default: None
If provided, used to load the data into dask arrays.
- ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks.
- ``chunks=None`` skips using dask, which is generally faster for
small arrays.
- ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
- ``chunks={}`` loads the data with dask using engine preferred chunks if
exposed by the backend, otherwise with a single chunk for all arrays.
See dask chunking for more details.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. This keyword may not be supported by all the backends.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
This keyword may not be supported by all the backends.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
This keyword may not be supported by all the backends.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error. This keyword may not be supported by all the backends.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
Only existing variables can be set as coordinates. Missing variables
will be silently ignored.
drop_variables: str or iterable of str, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
inline_array: bool, default: False
How to include the array in the dask task graph.
By default(``inline_array=False``) the array is included in a task by
itself, and each chunk refers to that task by its key. With
``inline_array=True``, Dask will instead inline the array directly
in the values of the task graph. See :py:func:`dask.array.from_array`.
chunked_array_type: str, optional
Which chunked array type to coerce the underlying data array to.
Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system.
Experimental API that should not be relied upon.
from_array_kwargs: dict
Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed
to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
backend_kwargs: dict
Additional keyword arguments passed on to the engine open function,
equivalent to `**kwargs`.
**kwargs: dict
Additional keyword arguments passed on to the engine open function.
For example:
- 'group': path to the netCDF4 group in the given file to open given as
a str,supported by "netcdf4", "h5netcdf", "zarr".
- 'lock': resource lock to use when reading data from disk. Only
relevant when using dask or another form of parallelism. By default,
appropriate locks are chosen to safely read and write files with the
currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
"scipy".
See engine open function for kwargs accepted by each specific engine.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
| /usr/src/app/target_test_cases/failed_tests_open_dataarray.txt | def open_dataarray(
filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
*,
engine: T_Engine | None = None,
chunks: T_Chunks | None = None,
cache: bool | None = None,
decode_cf: bool | None = None,
mask_and_scale: bool | None = None,
decode_times: bool | None = None,
decode_timedelta: bool | None = None,
use_cftime: bool | None = None,
concat_characters: bool | None = None,
decode_coords: Literal["coordinates", "all"] | bool | None = None,
drop_variables: str | Iterable[str] | None = None,
inline_array: bool = False,
chunked_array_type: str | None = None,
from_array_kwargs: dict[str, Any] | None = None,
backend_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> DataArray:
"""Open an DataArray from a file or file-like object containing a single
data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
, installed backend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4".
chunks : int, dict, 'auto' or None, default: None
If provided, used to load the data into dask arrays.
- ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks.
- ``chunks=None`` skips using dask, which is generally faster for
small arrays.
- ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
- ``chunks={}`` loads the data with dask using engine preferred chunks if
exposed by the backend, otherwise with a single chunk for all arrays.
See dask chunking for more details.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. This keyword may not be supported by all the backends.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
This keyword may not be supported by all the backends.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
This keyword may not be supported by all the backends.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error. This keyword may not be supported by all the backends.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
Only existing variables can be set as coordinates. Missing variables
will be silently ignored.
drop_variables: str or iterable of str, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
inline_array: bool, default: False
How to include the array in the dask task graph.
By default(``inline_array=False``) the array is included in a task by
itself, and each chunk refers to that task by its key. With
``inline_array=True``, Dask will instead inline the array directly
in the values of the task graph. See :py:func:`dask.array.from_array`.
chunked_array_type: str, optional
Which chunked array type to coerce the underlying data array to.
Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system.
Experimental API that should not be relied upon.
from_array_kwargs: dict
Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed
to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
backend_kwargs: dict
Additional keyword arguments passed on to the engine open function,
equivalent to `**kwargs`.
**kwargs: dict
Additional keyword arguments passed on to the engine open function.
For example:
- 'group': path to the netCDF4 group in the given file to open given as
a str,supported by "netcdf4", "h5netcdf", "zarr".
- 'lock': resource lock to use when reading data from disk. Only
relevant when using dask or another form of parallelism. By default,
appropriate locks are chosen to safely read and write files with the
currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
"scipy".
See engine open function for kwargs accepted by each specific engine.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
dataset = open_dataset(
filename_or_obj,
decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
engine=engine,
chunks=chunks,
cache=cache,
drop_variables=drop_variables,
inline_array=inline_array,
chunked_array_type=chunked_array_type,
from_array_kwargs=from_array_kwargs,
backend_kwargs=backend_kwargs,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
**kwargs,
)
if len(dataset.data_vars) != 1:
raise ValueError(
"Given file dataset contains more than one data "
"variable. Please read with xarray.open_dataset and "
"then select the variable you want."
)
else:
(data_array,) = dataset.data_vars.values()
data_array.set_close(dataset._close)
# Reset names if they were changed during saving
# to ensure that we can 'roundtrip' perfectly
if DATAARRAY_NAME in dataset.attrs:
data_array.name = dataset.attrs[DATAARRAY_NAME]
del dataset.attrs[DATAARRAY_NAME]
if data_array.name == DATAARRAY_VARIABLE:
data_array.name = None
return data_array
| open_dataarray |
xarray | 61 | xarray/backends/api.py | def open_dataset(
filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
*,
engine: T_Engine = None,
chunks: T_Chunks = None,
cache: bool | None = None,
decode_cf: bool | None = None,
mask_and_scale: bool | Mapping[str, bool] | None = None,
decode_times: bool | Mapping[str, bool] | None = None,
decode_timedelta: bool | Mapping[str, bool] | None = None,
use_cftime: bool | Mapping[str, bool] | None = None,
concat_characters: bool | Mapping[str, bool] | None = None,
decode_coords: Literal["coordinates", "all"] | bool | None = None,
drop_variables: str | Iterable[str] | None = None,
inline_array: bool = False,
chunked_array_type: str | None = None,
from_array_kwargs: dict[str, Any] | None = None,
backend_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> Dataset:
"""Open and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
, installed backend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
can also be used.
chunks : int, dict, 'auto' or None, default: None
If provided, used to load the data into dask arrays.
- ``chunks="auto"`` will use dask ``auto`` chunking taking into account the
engine preferred chunks.
- ``chunks=None`` skips using dask, which is generally faster for
small arrays.
- ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
- ``chunks={}`` loads the data with dask using the engine's preferred chunk
size, generally identical to the format's chunk size. If not available, a
single chunk for all arrays.
See dask chunking for more details.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool or dict-like, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. Pass a mapping, e.g. ``{"my_variable": False}``,
to toggle this feature per-variable individually.
This keyword may not be supported by all the backends.
decode_times : bool or dict-like, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
Pass a mapping, e.g. ``{"my_variable": False}``,
to toggle this feature per-variable individually.
This keyword may not be supported by all the backends.
decode_timedelta : bool or dict-like, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
Pass a mapping, e.g. ``{"my_variable": False}``,
to toggle this feature per-variable individually.
This keyword may not be supported by all the backends.
use_cftime: bool or dict-like, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error. Pass a mapping, e.g. ``{"my_variable": False}``,
to toggle this feature per-variable individually.
This keyword may not be supported by all the backends.
concat_characters : bool or dict-like, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
Pass a mapping, e.g. ``{"my_variable": False}``,
to toggle this feature per-variable individually.
This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
Only existing variables can be set as coordinates. Missing variables
will be silently ignored.
drop_variables: str or iterable of str, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
inline_array: bool, default: False
How to include the array in the dask task graph.
By default(``inline_array=False``) the array is included in a task by
itself, and each chunk refers to that task by its key. With
``inline_array=True``, Dask will instead inline the array directly
in the values of the task graph. See :py:func:`dask.array.from_array`.
chunked_array_type: str, optional
Which chunked array type to coerce this datasets' arrays to.
Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system.
Experimental API that should not be relied upon.
from_array_kwargs: dict
Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed
to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
backend_kwargs: dict
Additional keyword arguments passed on to the engine open function,
equivalent to `**kwargs`.
**kwargs: dict
Additional keyword arguments passed on to the engine open function.
For example:
- 'group': path to the netCDF4 group in the given file to open given as
a str,supported by "netcdf4", "h5netcdf", "zarr".
- 'lock': resource lock to use when reading data from disk. Only
relevant when using dask or another form of parallelism. By default,
appropriate locks are chosen to safely read and write files with the
currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
"scipy".
See engine open function for kwargs accepted by each specific engine.
Returns
-------
dataset : Dataset
The newly created dataset.
Notes
-----
``open_dataset`` opens the file with read-only access. When you modify
values of a Dataset, even one linked to files on disk, only the in-memory
copy you are manipulating in xarray is modified: the original file on disk
is never touched.
See Also
--------
open_mfdataset
"""
| /usr/src/app/target_test_cases/failed_tests_open_dataset.txt | def open_dataset(
filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
*,
engine: T_Engine = None,
chunks: T_Chunks = None,
cache: bool | None = None,
decode_cf: bool | None = None,
mask_and_scale: bool | Mapping[str, bool] | None = None,
decode_times: bool | Mapping[str, bool] | None = None,
decode_timedelta: bool | Mapping[str, bool] | None = None,
use_cftime: bool | Mapping[str, bool] | None = None,
concat_characters: bool | Mapping[str, bool] | None = None,
decode_coords: Literal["coordinates", "all"] | bool | None = None,
drop_variables: str | Iterable[str] | None = None,
inline_array: bool = False,
chunked_array_type: str | None = None,
from_array_kwargs: dict[str, Any] | None = None,
backend_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> Dataset:
"""Open and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
, installed backend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
can also be used.
chunks : int, dict, 'auto' or None, default: None
If provided, used to load the data into dask arrays.
- ``chunks="auto"`` will use dask ``auto`` chunking taking into account the
engine preferred chunks.
- ``chunks=None`` skips using dask, which is generally faster for
small arrays.
- ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
- ``chunks={}`` loads the data with dask using the engine's preferred chunk
size, generally identical to the format's chunk size. If not available, a
single chunk for all arrays.
See dask chunking for more details.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool or dict-like, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. Pass a mapping, e.g. ``{"my_variable": False}``,
to toggle this feature per-variable individually.
This keyword may not be supported by all the backends.
decode_times : bool or dict-like, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
Pass a mapping, e.g. ``{"my_variable": False}``,
to toggle this feature per-variable individually.
This keyword may not be supported by all the backends.
decode_timedelta : bool or dict-like, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
Pass a mapping, e.g. ``{"my_variable": False}``,
to toggle this feature per-variable individually.
This keyword may not be supported by all the backends.
use_cftime: bool or dict-like, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error. Pass a mapping, e.g. ``{"my_variable": False}``,
to toggle this feature per-variable individually.
This keyword may not be supported by all the backends.
concat_characters : bool or dict-like, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
Pass a mapping, e.g. ``{"my_variable": False}``,
to toggle this feature per-variable individually.
This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
Only existing variables can be set as coordinates. Missing variables
will be silently ignored.
drop_variables: str or iterable of str, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
inline_array: bool, default: False
How to include the array in the dask task graph.
By default(``inline_array=False``) the array is included in a task by
itself, and each chunk refers to that task by its key. With
``inline_array=True``, Dask will instead inline the array directly
in the values of the task graph. See :py:func:`dask.array.from_array`.
chunked_array_type: str, optional
Which chunked array type to coerce this datasets' arrays to.
Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system.
Experimental API that should not be relied upon.
from_array_kwargs: dict
Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed
to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
backend_kwargs: dict
Additional keyword arguments passed on to the engine open function,
equivalent to `**kwargs`.
**kwargs: dict
Additional keyword arguments passed on to the engine open function.
For example:
- 'group': path to the netCDF4 group in the given file to open given as
a str,supported by "netcdf4", "h5netcdf", "zarr".
- 'lock': resource lock to use when reading data from disk. Only
relevant when using dask or another form of parallelism. By default,
appropriate locks are chosen to safely read and write files with the
currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
"scipy".
See engine open function for kwargs accepted by each specific engine.
Returns
-------
dataset : Dataset
The newly created dataset.
Notes
-----
``open_dataset`` opens the file with read-only access. When you modify
values of a Dataset, even one linked to files on disk, only the in-memory
copy you are manipulating in xarray is modified: the original file on disk
is never touched.
See Also
--------
open_mfdataset
"""
if cache is None:
cache = chunks is None
if backend_kwargs is not None:
kwargs.update(backend_kwargs)
if engine is None:
engine = plugins.guess_engine(filename_or_obj)
if from_array_kwargs is None:
from_array_kwargs = {}
backend = plugins.get_backend(engine)
decoders = _resolve_decoders_kwargs(
decode_cf,
open_backend_dataset_parameters=backend.open_dataset_parameters,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
decode_timedelta=decode_timedelta,
concat_characters=concat_characters,
use_cftime=use_cftime,
decode_coords=decode_coords,
)
overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None)
backend_ds = backend.open_dataset(
filename_or_obj,
drop_variables=drop_variables,
**decoders,
**kwargs,
)
ds = _dataset_from_backend_dataset(
backend_ds,
filename_or_obj,
engine,
chunks,
cache,
overwrite_encoded_chunks,
inline_array,
chunked_array_type,
from_array_kwargs,
drop_variables=drop_variables,
**decoders,
**kwargs,
)
return ds
| open_dataset |
xarray | 62 | xarray/backends/api.py | def open_datatree(
filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
engine: T_Engine = None,
**kwargs,
) -> DataTree:
"""
Open and decode a DataTree from a file or file-like object, creating one tree node for each group in the file.
Parameters
----------
filename_or_obj : str, Path, file-like, or DataStore
Strings and Path objects are interpreted as a path to a netCDF file or Zarr store.
engine : str, optional
Xarray backend engine to use. Valid options include `{"netcdf4", "h5netcdf", "zarr"}`.
**kwargs : dict
Additional keyword arguments passed to :py:func:`~xarray.open_dataset` for each group.
Returns
-------
xarray.DataTree
"""
| /usr/src/app/target_test_cases/failed_tests_open_datatree.txt | def open_datatree(
filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
engine: T_Engine = None,
**kwargs,
) -> DataTree:
"""
Open and decode a DataTree from a file or file-like object, creating one tree node for each group in the file.
Parameters
----------
filename_or_obj : str, Path, file-like, or DataStore
Strings and Path objects are interpreted as a path to a netCDF file or Zarr store.
engine : str, optional
Xarray backend engine to use. Valid options include `{"netcdf4", "h5netcdf", "zarr"}`.
**kwargs : dict
Additional keyword arguments passed to :py:func:`~xarray.open_dataset` for each group.
Returns
-------
xarray.DataTree
"""
if engine is None:
engine = plugins.guess_engine(filename_or_obj)
backend = plugins.get_backend(engine)
return backend.open_datatree(filename_or_obj, **kwargs)
| open_datatree |
xarray | 63 | xarray/backends/api.py | def open_mfdataset(
paths: str | NestedSequence[str | os.PathLike],
chunks: T_Chunks | None = None,
concat_dim: (
str
| DataArray
| Index
| Sequence[str]
| Sequence[DataArray]
| Sequence[Index]
| None
) = None,
compat: CompatOptions = "no_conflicts",
preprocess: Callable[[Dataset], Dataset] | None = None,
engine: T_Engine | None = None,
data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
coords="different",
combine: Literal["by_coords", "nested"] = "by_coords",
parallel: bool = False,
join: JoinOptions = "outer",
attrs_file: str | os.PathLike | None = None,
combine_attrs: CombineAttrsOptions = "override",
**kwargs,
) -> Dataset:
"""Open multiple files as a single dataset.
If combine='by_coords' then the function ``combine_by_coords`` is used to combine
the datasets into one before returning the result, and if combine='nested' then
``combine_nested`` is used. The filepaths must be structured according to which
combining function is used, the details of which are given in the documentation for
``combine_by_coords`` and ``combine_nested``. By default ``combine='by_coords'``
will be used. Requires dask to be installed. See documentation for
details on dask [1]_. Global attributes from the ``attrs_file`` are used
for the combined dataset.
Parameters
----------
paths : str or nested sequence of paths
Either a string glob in the form ``"path/to/my/files/*.nc"`` or an explicit list of
files to open. Paths can be given as strings or as pathlib Paths. If
concatenation along more than one dimension is desired, then ``paths`` must be a
nested list-of-lists (see ``combine_nested`` for details). (A string glob will
be expanded to a 1-dimensional list.)
chunks : int, dict, 'auto' or None, optional
Dictionary with keys given by dimension names and values given by chunk sizes.
In general, these should divide the dimensions of each dataset. If int, chunk
each dimension by ``chunks``. By default, chunks will be chosen to load entire
input files into memory at once. This has a major impact on performance: please
see the full documentation for more details [2]_. This argument is evaluated
on a per-file basis, so chunk sizes that span multiple files will be ignored.
concat_dim : str, DataArray, Index or a Sequence of these or None, optional
Dimensions to concatenate files along. You only need to provide this argument
if ``combine='nested'``, and if any of the dimensions along which you want to
concatenate is not a dimension in the original datasets, e.g., if you want to
stack a collection of 2D arrays along a third dimension. Set
``concat_dim=[..., None, ...]`` explicitly to disable concatenation along a
particular dimension. Default is None, which for a 1D list of filepaths is
equivalent to opening the files separately and then merging them with
``xarray.merge``.
combine : {"by_coords", "nested"}, optional
Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to
combine all the data. Default is to use ``xarray.combine_by_coords``.
compat : {"identical", "equals", "broadcast_equals", \
"no_conflicts", "override"}, default: "no_conflicts"
String indicating how to compare variables of the same name for
potential conflicts when merging:
* "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* "equals": all values and dimensions must be the same.
* "identical": all values, dimensions and attributes must be the
same.
* "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
* "override": skip comparing and pick variable from first dataset
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding["source"]``.
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
, installed backend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4".
data_vars : {"minimal", "different", "all"} or list of str, default: "all"
These data variables will be concatenated together:
* "minimal": Only data variables in which the dimension already
appears are included.
* "different": Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* "all": All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the "minimal" data variables.
coords : {"minimal", "different", "all"} or list of str, optional
These coordinate variables will be concatenated together:
* "minimal": Only coordinates in which the dimension already appears
are included.
* "different": Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* "all": All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the "minimal" coordinates.
parallel : bool, default: False
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer"
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
attrs_file : str or path-like, optional
Path of the file used to read global attributes from.
By default global attributes are read from the first file provided,
with wildcard matches sorted by filename.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "override"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`. For an
overview of some of the possible options, see the documentation of
:py:func:`xarray.open_dataset`
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
combine_by_coords
combine_nested
open_dataset
Examples
--------
A user might want to pass additional arguments into ``preprocess`` when
applying some operation to many individual files that are being opened. One route
to do this is through the use of ``functools.partial``.
>>> from functools import partial
>>> def _preprocess(x, lon_bnds, lat_bnds):
... return x.sel(lon=slice(*lon_bnds), lat=slice(*lat_bnds))
...
>>> lon_bnds, lat_bnds = (-110, -105), (40, 45)
>>> partial_func = partial(_preprocess, lon_bnds=lon_bnds, lat_bnds=lat_bnds)
>>> ds = xr.open_mfdataset(
... "file_*.nc", concat_dim="time", preprocess=partial_func
... ) # doctest: +SKIP
It is also possible to use any argument to ``open_dataset`` together
with ``open_mfdataset``, such as for example ``drop_variables``:
>>> ds = xr.open_mfdataset(
... "file.nc", drop_variables=["varname_1", "varname_2"] # any list of vars
... ) # doctest: +SKIP
References
----------
.. [1] https://docs.xarray.dev/en/stable/dask.html
.. [2] https://docs.xarray.dev/en/stable/dask.html#chunking-and-performance
"""
| /usr/src/app/target_test_cases/failed_tests_open_mfdataset.txt | def open_mfdataset(
paths: str | NestedSequence[str | os.PathLike],
chunks: T_Chunks | None = None,
concat_dim: (
str
| DataArray
| Index
| Sequence[str]
| Sequence[DataArray]
| Sequence[Index]
| None
) = None,
compat: CompatOptions = "no_conflicts",
preprocess: Callable[[Dataset], Dataset] | None = None,
engine: T_Engine | None = None,
data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
coords="different",
combine: Literal["by_coords", "nested"] = "by_coords",
parallel: bool = False,
join: JoinOptions = "outer",
attrs_file: str | os.PathLike | None = None,
combine_attrs: CombineAttrsOptions = "override",
**kwargs,
) -> Dataset:
"""Open multiple files as a single dataset.
If combine='by_coords' then the function ``combine_by_coords`` is used to combine
the datasets into one before returning the result, and if combine='nested' then
``combine_nested`` is used. The filepaths must be structured according to which
combining function is used, the details of which are given in the documentation for
``combine_by_coords`` and ``combine_nested``. By default ``combine='by_coords'``
will be used. Requires dask to be installed. See documentation for
details on dask [1]_. Global attributes from the ``attrs_file`` are used
for the combined dataset.
Parameters
----------
paths : str or nested sequence of paths
Either a string glob in the form ``"path/to/my/files/*.nc"`` or an explicit list of
files to open. Paths can be given as strings or as pathlib Paths. If
concatenation along more than one dimension is desired, then ``paths`` must be a
nested list-of-lists (see ``combine_nested`` for details). (A string glob will
be expanded to a 1-dimensional list.)
chunks : int, dict, 'auto' or None, optional
Dictionary with keys given by dimension names and values given by chunk sizes.
In general, these should divide the dimensions of each dataset. If int, chunk
each dimension by ``chunks``. By default, chunks will be chosen to load entire
input files into memory at once. This has a major impact on performance: please
see the full documentation for more details [2]_. This argument is evaluated
on a per-file basis, so chunk sizes that span multiple files will be ignored.
concat_dim : str, DataArray, Index or a Sequence of these or None, optional
Dimensions to concatenate files along. You only need to provide this argument
if ``combine='nested'``, and if any of the dimensions along which you want to
concatenate is not a dimension in the original datasets, e.g., if you want to
stack a collection of 2D arrays along a third dimension. Set
``concat_dim=[..., None, ...]`` explicitly to disable concatenation along a
particular dimension. Default is None, which for a 1D list of filepaths is
equivalent to opening the files separately and then merging them with
``xarray.merge``.
combine : {"by_coords", "nested"}, optional
Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to
combine all the data. Default is to use ``xarray.combine_by_coords``.
compat : {"identical", "equals", "broadcast_equals", \
"no_conflicts", "override"}, default: "no_conflicts"
String indicating how to compare variables of the same name for
potential conflicts when merging:
* "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* "equals": all values and dimensions must be the same.
* "identical": all values, dimensions and attributes must be the
same.
* "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
* "override": skip comparing and pick variable from first dataset
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding["source"]``.
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
, installed backend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4".
data_vars : {"minimal", "different", "all"} or list of str, default: "all"
These data variables will be concatenated together:
* "minimal": Only data variables in which the dimension already
appears are included.
* "different": Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* "all": All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the "minimal" data variables.
coords : {"minimal", "different", "all"} or list of str, optional
These coordinate variables will be concatenated together:
* "minimal": Only coordinates in which the dimension already appears
are included.
* "different": Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* "all": All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the "minimal" coordinates.
parallel : bool, default: False
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer"
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
attrs_file : str or path-like, optional
Path of the file used to read global attributes from.
By default global attributes are read from the first file provided,
with wildcard matches sorted by filename.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "override"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`. For an
overview of some of the possible options, see the documentation of
:py:func:`xarray.open_dataset`
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
combine_by_coords
combine_nested
open_dataset
Examples
--------
A user might want to pass additional arguments into ``preprocess`` when
applying some operation to many individual files that are being opened. One route
to do this is through the use of ``functools.partial``.
>>> from functools import partial
>>> def _preprocess(x, lon_bnds, lat_bnds):
... return x.sel(lon=slice(*lon_bnds), lat=slice(*lat_bnds))
...
>>> lon_bnds, lat_bnds = (-110, -105), (40, 45)
>>> partial_func = partial(_preprocess, lon_bnds=lon_bnds, lat_bnds=lat_bnds)
>>> ds = xr.open_mfdataset(
... "file_*.nc", concat_dim="time", preprocess=partial_func
... ) # doctest: +SKIP
It is also possible to use any argument to ``open_dataset`` together
with ``open_mfdataset``, such as for example ``drop_variables``:
>>> ds = xr.open_mfdataset(
... "file.nc", drop_variables=["varname_1", "varname_2"] # any list of vars
... ) # doctest: +SKIP
References
----------
.. [1] https://docs.xarray.dev/en/stable/dask.html
.. [2] https://docs.xarray.dev/en/stable/dask.html#chunking-and-performance
"""
paths = _find_absolute_paths(paths, engine=engine, **kwargs)
if not paths:
raise OSError("no files to open")
if combine == "nested":
if isinstance(concat_dim, str | DataArray) or concat_dim is None:
concat_dim = [concat_dim] # type: ignore[assignment]
# This creates a flat list which is easier to iterate over, whilst
# encoding the originally-supplied structure as "ids".
# The "ids" are not used at all if combine='by_coords`.
combined_ids_paths = _infer_concat_order_from_positions(paths)
ids, paths = (
list(combined_ids_paths.keys()),
list(combined_ids_paths.values()),
)
elif concat_dim is not None:
raise ValueError(
"When combine='by_coords', passing a value for `concat_dim` has no "
"effect. To manually combine along a specific dimension you should "
"instead specify combine='nested' along with a value for `concat_dim`.",
)
open_kwargs = dict(engine=engine, chunks=chunks or {}, **kwargs)
if parallel:
import dask
# wrap the open_dataset, getattr, and preprocess with delayed
open_ = dask.delayed(open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **open_kwargs) for p in paths]
closers = [getattr_(ds, "_close") for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets, closers = dask.compute(datasets, closers)
# Combine all datasets, closing them in case of a ValueError
try:
if combine == "nested":
# Combined nested list by successive concat and merge operations
# along each dimension, using structure given by "ids"
combined = _nested_combine(
datasets,
concat_dims=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
ids=ids,
join=join,
combine_attrs=combine_attrs,
)
elif combine == "by_coords":
# Redo ordering from coordinates, ignoring how they were ordered
# previously
combined = combine_by_coords(
datasets,
compat=compat,
data_vars=data_vars,
coords=coords,
join=join,
combine_attrs=combine_attrs,
)
else:
raise ValueError(
f"{combine} is an invalid option for the keyword argument"
" ``combine``"
)
except ValueError:
for ds in datasets:
ds.close()
raise
combined.set_close(partial(_multi_file_closer, closers))
# read global attributes from the attrs_file or from the first dataset
if attrs_file is not None:
if isinstance(attrs_file, os.PathLike):
attrs_file = cast(str, os.fspath(attrs_file))
combined.attrs = datasets[paths.index(attrs_file)].attrs
return combined
| open_mfdataset |
xarray | 64 | xarray/backends/zarr.py | def open_zarr(
store,
group=None,
synchronizer=None,
chunks="auto",
decode_cf=True,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables=None,
consolidated=None,
overwrite_encoded_chunks=False,
chunk_store=None,
storage_options=None,
decode_timedelta=None,
use_cftime=None,
zarr_version=None,
chunked_array_type: str | None = None,
from_array_kwargs: dict[str, Any] | None = None,
**kwargs,
):
"""Load and decode a dataset from a Zarr store.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute or must have NCZarr format.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, optional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int, dict, 'auto' or None, default: 'auto'
If provided, used to load the data into dask arrays.
- ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks.
- ``chunks=None`` skips using dask, which is generally faster for
small arrays.
- ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
- ``chunks={}`` loads the data with dask using engine preferred chunks if
exposed by the backend, otherwise with a single chunk for all arrays.
See dask chunking for more details.
overwrite_encoded_chunks : bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : str or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
By default (`consolidate=None`), attempts to read consolidated metadata,
falling back to read non-consolidated metadata if that fails.
When the experimental ``zarr_version=3``, ``consolidated`` must be
either be ``None`` or ``False``.
chunk_store : MutableMapping, optional
A separate Zarr store only for chunk data.
storage_options : dict, optional
Any additional parameters for the storage backend (ignored for local
paths).
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
use_cftime : bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
zarr_version : int or None, optional
The desired zarr spec version to target (currently 2 or 3). The default
of None will attempt to determine the zarr version from ``store`` when
possible, otherwise defaulting to 2.
chunked_array_type: str, optional
Which chunked array type to coerce this datasets' arrays to.
Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system.
Experimental API that should not be relied upon.
from_array_kwargs: dict, optional
Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
Defaults to {'manager': 'dask'}, meaning additional kwargs will be passed eventually to
:py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
open_mfdataset
References
----------
http://zarr.readthedocs.io/
"""
| /usr/src/app/target_test_cases/failed_tests_open_zarr.txt | def open_zarr(
store,
group=None,
synchronizer=None,
chunks="auto",
decode_cf=True,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables=None,
consolidated=None,
overwrite_encoded_chunks=False,
chunk_store=None,
storage_options=None,
decode_timedelta=None,
use_cftime=None,
zarr_version=None,
chunked_array_type: str | None = None,
from_array_kwargs: dict[str, Any] | None = None,
**kwargs,
):
"""Load and decode a dataset from a Zarr store.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute or must have NCZarr format.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, optional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int, dict, 'auto' or None, default: 'auto'
If provided, used to load the data into dask arrays.
- ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks.
- ``chunks=None`` skips using dask, which is generally faster for
small arrays.
- ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
- ``chunks={}`` loads the data with dask using engine preferred chunks if
exposed by the backend, otherwise with a single chunk for all arrays.
See dask chunking for more details.
overwrite_encoded_chunks : bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : str or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
By default (`consolidate=None`), attempts to read consolidated metadata,
falling back to read non-consolidated metadata if that fails.
When the experimental ``zarr_version=3``, ``consolidated`` must be
either be ``None`` or ``False``.
chunk_store : MutableMapping, optional
A separate Zarr store only for chunk data.
storage_options : dict, optional
Any additional parameters for the storage backend (ignored for local
paths).
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
use_cftime : bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
zarr_version : int or None, optional
The desired zarr spec version to target (currently 2 or 3). The default
of None will attempt to determine the zarr version from ``store`` when
possible, otherwise defaulting to 2.
chunked_array_type: str, optional
Which chunked array type to coerce this datasets' arrays to.
Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system.
Experimental API that should not be relied upon.
from_array_kwargs: dict, optional
Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
Defaults to {'manager': 'dask'}, meaning additional kwargs will be passed eventually to
:py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
open_mfdataset
References
----------
http://zarr.readthedocs.io/
"""
from xarray.backends.api import open_dataset
if from_array_kwargs is None:
from_array_kwargs = {}
if chunks == "auto":
try:
guess_chunkmanager(
chunked_array_type
) # attempt to import that parallel backend
chunks = {}
except ValueError:
chunks = None
if kwargs:
raise TypeError(
"open_zarr() got unexpected keyword arguments " + ",".join(kwargs.keys())
)
backend_kwargs = {
"synchronizer": synchronizer,
"consolidated": consolidated,
"overwrite_encoded_chunks": overwrite_encoded_chunks,
"chunk_store": chunk_store,
"storage_options": storage_options,
"stacklevel": 4,
"zarr_version": zarr_version,
}
ds = open_dataset(
filename_or_obj=store,
group=group,
decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
engine="zarr",
chunks=chunks,
drop_variables=drop_variables,
chunked_array_type=chunked_array_type,
from_array_kwargs=from_array_kwargs,
backend_kwargs=backend_kwargs,
decode_timedelta=decode_timedelta,
use_cftime=use_cftime,
zarr_version=zarr_version,
)
return ds
| open_zarr |
xarray | 65 | xarray/plot/dataarray_plot.py | def plot(
darray: DataArray,
*,
row: Hashable | None = None,
col: Hashable | None = None,
col_wrap: int | None = None,
ax: Axes | None = None,
hue: Hashable | None = None,
subplot_kws: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""
Default plot of DataArray using :py:mod:`matplotlib:matplotlib.pyplot`.
Calls xarray plotting function based on the dimensions of
the squeezed DataArray.
=============== ===========================
Dimensions Plotting function
=============== ===========================
1 :py:func:`xarray.plot.line`
2 :py:func:`xarray.plot.pcolormesh`
Anything else :py:func:`xarray.plot.hist`
=============== ===========================
Parameters
----------
darray : DataArray
row : Hashable or None, optional
If passed, make row faceted plots on this dimension name.
col : Hashable or None, optional
If passed, make column faceted plots on this dimension name.
col_wrap : int or None, optional
Use together with ``col`` to wrap faceted plots.
ax : matplotlib axes object, optional
Axes on which to plot. By default, use the current axes.
Mutually exclusive with ``size``, ``figsize`` and facets.
hue : Hashable or None, optional
If passed, make faceted line plots with hue on this dimension name.
subplot_kws : dict, optional
Dictionary of keyword arguments for Matplotlib subplots
(see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`).
**kwargs : optional
Additional keyword arguments for Matplotlib.
See Also
--------
xarray.DataArray.squeeze
"""
| /usr/src/app/target_test_cases/failed_tests_plot.txt | def plot(
darray: DataArray,
*,
row: Hashable | None = None,
col: Hashable | None = None,
col_wrap: int | None = None,
ax: Axes | None = None,
hue: Hashable | None = None,
subplot_kws: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""
Default plot of DataArray using :py:mod:`matplotlib:matplotlib.pyplot`.
Calls xarray plotting function based on the dimensions of
the squeezed DataArray.
=============== ===========================
Dimensions Plotting function
=============== ===========================
1 :py:func:`xarray.plot.line`
2 :py:func:`xarray.plot.pcolormesh`
Anything else :py:func:`xarray.plot.hist`
=============== ===========================
Parameters
----------
darray : DataArray
row : Hashable or None, optional
If passed, make row faceted plots on this dimension name.
col : Hashable or None, optional
If passed, make column faceted plots on this dimension name.
col_wrap : int or None, optional
Use together with ``col`` to wrap faceted plots.
ax : matplotlib axes object, optional
Axes on which to plot. By default, use the current axes.
Mutually exclusive with ``size``, ``figsize`` and facets.
hue : Hashable or None, optional
If passed, make faceted line plots with hue on this dimension name.
subplot_kws : dict, optional
Dictionary of keyword arguments for Matplotlib subplots
(see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`).
**kwargs : optional
Additional keyword arguments for Matplotlib.
See Also
--------
xarray.DataArray.squeeze
"""
darray = darray.squeeze(
d for d, s in darray.sizes.items() if s == 1 and d not in (row, col, hue)
).compute()
plot_dims = set(darray.dims)
plot_dims.discard(row)
plot_dims.discard(col)
plot_dims.discard(hue)
ndims = len(plot_dims)
plotfunc: Callable
if ndims == 0 or darray.size == 0:
raise TypeError("No numeric data to plot.")
if ndims in (1, 2):
if row or col:
kwargs["subplot_kws"] = subplot_kws
kwargs["row"] = row
kwargs["col"] = col
kwargs["col_wrap"] = col_wrap
if ndims == 1:
plotfunc = line
kwargs["hue"] = hue
elif ndims == 2:
if hue:
plotfunc = line
kwargs["hue"] = hue
else:
plotfunc = pcolormesh
kwargs["subplot_kws"] = subplot_kws
else:
if row or col or hue:
raise ValueError(
"Only 1d and 2d plots are supported for facets in xarray. "
"See the package `Seaborn` for more options."
)
plotfunc = hist
kwargs["ax"] = ax
return plotfunc(darray, **kwargs)
| plot |
xarray | 66 | xarray/core/_aggregations.py | def prod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
min_count: int | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``prod`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int or None, optional
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``prod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``prod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.prod
dask.array.prod
DataArray.prod
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.prod()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 0.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.prod(skipna=False)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B nan
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.prod(skipna=True, min_count=2)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 0.0
"""
| /usr/src/app/target_test_cases/failed_tests_prod.txt | def prod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
min_count: int | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``prod`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int or None, optional
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``prod`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``prod`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.prod
dask.array.prod
DataArray.prod
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.prod()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 0.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.prod(skipna=False)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B nan
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.prod(skipna=True, min_count=2)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 0.0
"""
return self.reduce(
duck_array_ops.prod,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
| prod |
xarray | 67 | xarray/core/groupby.py | def quantile(
self,
q: ArrayLike,
dim: Dims = None,
*,
method: QuantileMethods = "linear",
keep_attrs: bool | None = None,
skipna: bool | None = None,
interpolation: QuantileMethods | None = None,
) -> T_Xarray:
"""Compute the qth quantile over each array in the groups and
concatenate them together into a new array.
Parameters
----------
q : float or sequence of float
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or Iterable of Hashable, optional
Dimension(s) over which to apply quantile.
Defaults to the grouped dimension.
method : str, default: "linear"
This optional parameter specifies the interpolation method to use when the
desired quantile lies between two data points. The options sorted by their R
type as summarized in the H&F paper [1]_ are:
1. "inverted_cdf"
2. "averaged_inverted_cdf"
3. "closest_observation"
4. "interpolated_inverted_cdf"
5. "hazen"
6. "weibull"
7. "linear" (default)
8. "median_unbiased"
9. "normal_unbiased"
The first three methods are discontiuous. The following discontinuous
variations of the default "linear" (7.) option are also available:
* "lower"
* "higher"
* "midpoint"
* "nearest"
See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument
was previously called "interpolation", renamed in accordance with numpy
version 1.22.0.
keep_attrs : bool or None, default: None
If True, the dataarray's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new
object will be returned without attributes.
skipna : bool or None, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result is a
scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile. In either case a
quantile dimension is added to the return array. The other
dimensions are the dimensions that remain after the
reduction of the array.
See Also
--------
numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile
DataArray.quantile
Examples
--------
>>> da = xr.DataArray(
... [[1.3, 8.4, 0.7, 6.9], [0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]],
... coords={"x": [0, 0, 1], "y": [1, 1, 2, 2]},
... dims=("x", "y"),
... )
>>> ds = xr.Dataset({"a": da})
>>> da.groupby("x").quantile(0)
<xarray.DataArray (x: 2, y: 4)> Size: 64B
array([[0.7, 4.2, 0.7, 1.5],
[6.5, 7.3, 2.6, 1.9]])
Coordinates:
* y (y) int64 32B 1 1 2 2
quantile float64 8B 0.0
* x (x) int64 16B 0 1
>>> ds.groupby("y").quantile(0, dim=...)
<xarray.Dataset> Size: 40B
Dimensions: (y: 2)
Coordinates:
quantile float64 8B 0.0
* y (y) int64 16B 1 2
Data variables:
a (y) float64 16B 0.7 0.7
>>> da.groupby("x").quantile([0, 0.5, 1])
<xarray.DataArray (x: 2, y: 4, quantile: 3)> Size: 192B
array([[[0.7 , 1. , 1.3 ],
[4.2 , 6.3 , 8.4 ],
[0.7 , 5.05, 9.4 ],
[1.5 , 4.2 , 6.9 ]],
<BLANKLINE>
[[6.5 , 6.5 , 6.5 ],
[7.3 , 7.3 , 7.3 ],
[2.6 , 2.6 , 2.6 ],
[1.9 , 1.9 , 1.9 ]]])
Coordinates:
* y (y) int64 32B 1 1 2 2
* quantile (quantile) float64 24B 0.0 0.5 1.0
* x (x) int64 16B 0 1
>>> ds.groupby("y").quantile([0, 0.5, 1], dim=...)
<xarray.Dataset> Size: 88B
Dimensions: (y: 2, quantile: 3)
Coordinates:
* quantile (quantile) float64 24B 0.0 0.5 1.0
* y (y) int64 16B 1 2
Data variables:
a (y, quantile) float64 48B 0.7 5.35 8.4 0.7 2.25 9.4
References
----------
.. [1] R. J. Hyndman and Y. Fan,
"Sample quantiles in statistical packages,"
The American Statistician, 50(4), pp. 361-365, 1996
"""
| /usr/src/app/target_test_cases/failed_tests_quantile.txt | def quantile(
self,
q: ArrayLike,
dim: Dims = None,
*,
method: QuantileMethods = "linear",
keep_attrs: bool | None = None,
skipna: bool | None = None,
interpolation: QuantileMethods | None = None,
) -> T_Xarray:
"""Compute the qth quantile over each array in the groups and
concatenate them together into a new array.
Parameters
----------
q : float or sequence of float
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or Iterable of Hashable, optional
Dimension(s) over which to apply quantile.
Defaults to the grouped dimension.
method : str, default: "linear"
This optional parameter specifies the interpolation method to use when the
desired quantile lies between two data points. The options sorted by their R
type as summarized in the H&F paper [1]_ are:
1. "inverted_cdf"
2. "averaged_inverted_cdf"
3. "closest_observation"
4. "interpolated_inverted_cdf"
5. "hazen"
6. "weibull"
7. "linear" (default)
8. "median_unbiased"
9. "normal_unbiased"
The first three methods are discontiuous. The following discontinuous
variations of the default "linear" (7.) option are also available:
* "lower"
* "higher"
* "midpoint"
* "nearest"
See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument
was previously called "interpolation", renamed in accordance with numpy
version 1.22.0.
keep_attrs : bool or None, default: None
If True, the dataarray's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new
object will be returned without attributes.
skipna : bool or None, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result is a
scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile. In either case a
quantile dimension is added to the return array. The other
dimensions are the dimensions that remain after the
reduction of the array.
See Also
--------
numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile
DataArray.quantile
Examples
--------
>>> da = xr.DataArray(
... [[1.3, 8.4, 0.7, 6.9], [0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]],
... coords={"x": [0, 0, 1], "y": [1, 1, 2, 2]},
... dims=("x", "y"),
... )
>>> ds = xr.Dataset({"a": da})
>>> da.groupby("x").quantile(0)
<xarray.DataArray (x: 2, y: 4)> Size: 64B
array([[0.7, 4.2, 0.7, 1.5],
[6.5, 7.3, 2.6, 1.9]])
Coordinates:
* y (y) int64 32B 1 1 2 2
quantile float64 8B 0.0
* x (x) int64 16B 0 1
>>> ds.groupby("y").quantile(0, dim=...)
<xarray.Dataset> Size: 40B
Dimensions: (y: 2)
Coordinates:
quantile float64 8B 0.0
* y (y) int64 16B 1 2
Data variables:
a (y) float64 16B 0.7 0.7
>>> da.groupby("x").quantile([0, 0.5, 1])
<xarray.DataArray (x: 2, y: 4, quantile: 3)> Size: 192B
array([[[0.7 , 1. , 1.3 ],
[4.2 , 6.3 , 8.4 ],
[0.7 , 5.05, 9.4 ],
[1.5 , 4.2 , 6.9 ]],
<BLANKLINE>
[[6.5 , 6.5 , 6.5 ],
[7.3 , 7.3 , 7.3 ],
[2.6 , 2.6 , 2.6 ],
[1.9 , 1.9 , 1.9 ]]])
Coordinates:
* y (y) int64 32B 1 1 2 2
* quantile (quantile) float64 24B 0.0 0.5 1.0
* x (x) int64 16B 0 1
>>> ds.groupby("y").quantile([0, 0.5, 1], dim=...)
<xarray.Dataset> Size: 88B
Dimensions: (y: 2, quantile: 3)
Coordinates:
* quantile (quantile) float64 24B 0.0 0.5 1.0
* y (y) int64 16B 1 2
Data variables:
a (y, quantile) float64 48B 0.7 5.35 8.4 0.7 2.25 9.4
References
----------
.. [1] R. J. Hyndman and Y. Fan,
"Sample quantiles in statistical packages,"
The American Statistician, 50(4), pp. 361-365, 1996
"""
if dim is None:
dim = (self._group_dim,)
# Dataset.quantile does this, do it for flox to ensure same output.
q = np.asarray(q, dtype=np.float64)
if (
method == "linear"
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
and module_available("flox", minversion="0.9.4")
):
result = self._flox_reduce(
func="quantile", q=q, dim=dim, keep_attrs=keep_attrs, skipna=skipna
)
return result
else:
return self.map(
self._obj.__class__.quantile,
shortcut=False,
q=q,
dim=dim,
method=method,
keep_attrs=keep_attrs,
skipna=skipna,
interpolation=interpolation,
)
| quantile |
xarray | 68 | xarray/core/rolling.py | def reduce(
self, func: Callable, keep_attrs: bool | None = None, **kwargs: Any
) -> DataArray:
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`func(x, **kwargs)` to return the result of collapsing an
np.ndarray over an the rolling dimension.
keep_attrs : bool, default: None
If True, the attributes (``attrs``) will be copied from the original
object to the new one. If False, the new object will be returned
without attributes. If None uses the global default.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
Array with summarized data.
Examples
--------
>>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b"))
>>> rolling = da.rolling(b=3)
>>> rolling.construct("window_dim")
<xarray.DataArray (a: 2, b: 4, window_dim: 3)> Size: 192B
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
Dimensions without coordinates: a, b, window_dim
>>> rolling.reduce(np.sum)
<xarray.DataArray (a: 2, b: 4)> Size: 64B
array([[nan, nan, 3., 6.],
[nan, nan, 15., 18.]])
Dimensions without coordinates: a, b
>>> rolling = da.rolling(b=3, min_periods=1)
>>> rolling.reduce(np.nansum)
<xarray.DataArray (a: 2, b: 4)> Size: 64B
array([[ 0., 1., 3., 6.],
[ 4., 9., 15., 18.]])
Dimensions without coordinates: a, b
"""
| /usr/src/app/target_test_cases/failed_tests_reduce.txt | def reduce(
self, func: Callable, keep_attrs: bool | None = None, **kwargs: Any
) -> DataArray:
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`func(x, **kwargs)` to return the result of collapsing an
np.ndarray over an the rolling dimension.
keep_attrs : bool, default: None
If True, the attributes (``attrs``) will be copied from the original
object to the new one. If False, the new object will be returned
without attributes. If None uses the global default.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
Array with summarized data.
Examples
--------
>>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b"))
>>> rolling = da.rolling(b=3)
>>> rolling.construct("window_dim")
<xarray.DataArray (a: 2, b: 4, window_dim: 3)> Size: 192B
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
Dimensions without coordinates: a, b, window_dim
>>> rolling.reduce(np.sum)
<xarray.DataArray (a: 2, b: 4)> Size: 64B
array([[nan, nan, 3., 6.],
[nan, nan, 15., 18.]])
Dimensions without coordinates: a, b
>>> rolling = da.rolling(b=3, min_periods=1)
>>> rolling.reduce(np.nansum)
<xarray.DataArray (a: 2, b: 4)> Size: 64B
array([[ 0., 1., 3., 6.],
[ 4., 9., 15., 18.]])
Dimensions without coordinates: a, b
"""
keep_attrs = self._get_keep_attrs(keep_attrs)
rolling_dim = {
d: utils.get_temp_dimname(self.obj.dims, f"_rolling_dim_{d}")
for d in self.dim
}
# save memory with reductions GH4325
fillna = kwargs.pop("fillna", dtypes.NA)
if fillna is not dtypes.NA:
obj = self.obj.fillna(fillna)
else:
obj = self.obj
windows = self._construct(
obj, rolling_dim, keep_attrs=keep_attrs, fill_value=fillna
)
dim = list(rolling_dim.values())
result = windows.reduce(func, dim=dim, keep_attrs=keep_attrs, **kwargs)
# Find valid windows based on count.
counts = self._counts(keep_attrs=False)
return result.where(counts >= self.min_periods)
| reduce |
xarray | 69 | xarray/core/dataset.py | def reindex_like(
self,
other: T_Xarray,
method: ReindexMethodOptions = None,
tolerance: float | Iterable[float] | str | None = None,
copy: bool = True,
fill_value: Any = xrdtypes.NA,
) -> Self:
"""
Conform this object onto the indexes of another object, for indexes which the
objects share. Missing values are filled with ``fill_value``. The default fill
value is NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mis-matched index values will be filled in with
NaN, and any mis-matched dimension names will simply be ignored.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill", None}, optional
Method to use for filling index values from other not found in this
dataset:
- None (default): don't fill gaps
- "pad" / "ffill": propagate last valid index value forward
- "backfill" / "bfill": propagate next valid index value backward
- "nearest": use nearest valid index value
tolerance : float | Iterable[float] | str | None, default: None
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like must be the same size as the index and its dtype
must exactly match the index’s type.
copy : bool, default: True
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like maps
variable names to fill values.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but coordinates from the
other object.
See Also
--------
Dataset.reindex
DataArray.reindex_like
align
"""
| /usr/src/app/target_test_cases/failed_tests_reindex_like.txt | def reindex_like(
self,
other: T_Xarray,
method: ReindexMethodOptions = None,
tolerance: float | Iterable[float] | str | None = None,
copy: bool = True,
fill_value: Any = xrdtypes.NA,
) -> Self:
"""
Conform this object onto the indexes of another object, for indexes which the
objects share. Missing values are filled with ``fill_value``. The default fill
value is NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mis-matched index values will be filled in with
NaN, and any mis-matched dimension names will simply be ignored.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill", None}, optional
Method to use for filling index values from other not found in this
dataset:
- None (default): don't fill gaps
- "pad" / "ffill": propagate last valid index value forward
- "backfill" / "bfill": propagate next valid index value backward
- "nearest": use nearest valid index value
tolerance : float | Iterable[float] | str | None, default: None
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like must be the same size as the index and its dtype
must exactly match the index’s type.
copy : bool, default: True
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like maps
variable names to fill values.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but coordinates from the
other object.
See Also
--------
Dataset.reindex
DataArray.reindex_like
align
"""
return alignment.reindex_like(
self,
other=other,
method=method,
tolerance=tolerance,
copy=copy,
fill_value=fill_value,
)
| reindex_like |
xarray | 70 | xarray/core/dataset.py | def reset_coords(
self,
names: Dims = None,
drop: bool = False,
) -> Self:
"""Given names of coordinates, reset them to become variables
Parameters
----------
names : str, Iterable of Hashable or None, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, default: False
If True, remove coordinates instead of converting them into
variables.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "temperature": (
... ["time", "lat", "lon"],
... [[[25, 26], [27, 28]], [[29, 30], [31, 32]]],
... ),
... "precipitation": (
... ["time", "lat", "lon"],
... [[[0.5, 0.8], [0.2, 0.4]], [[0.3, 0.6], [0.7, 0.9]]],
... ),
... },
... coords={
... "time": pd.date_range(start="2023-01-01", periods=2),
... "lat": [40, 41],
... "lon": [-80, -79],
... "altitude": 1000,
... },
... )
# Dataset before resetting coordinates
>>> dataset
<xarray.Dataset> Size: 184B
Dimensions: (time: 2, lat: 2, lon: 2)
Coordinates:
* time (time) datetime64[ns] 16B 2023-01-01 2023-01-02
* lat (lat) int64 16B 40 41
* lon (lon) int64 16B -80 -79
altitude int64 8B 1000
Data variables:
temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32
precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9
# Reset the 'altitude' coordinate
>>> dataset_reset = dataset.reset_coords("altitude")
# Dataset after resetting coordinates
>>> dataset_reset
<xarray.Dataset> Size: 184B
Dimensions: (time: 2, lat: 2, lon: 2)
Coordinates:
* time (time) datetime64[ns] 16B 2023-01-01 2023-01-02
* lat (lat) int64 16B 40 41
* lon (lon) int64 16B -80 -79
Data variables:
temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32
precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9
altitude int64 8B 1000
Returns
-------
Dataset
See Also
--------
Dataset.set_coords
"""
| /usr/src/app/target_test_cases/failed_tests_reset_coords.txt | def reset_coords(
self,
names: Dims = None,
drop: bool = False,
) -> Self:
"""Given names of coordinates, reset them to become variables
Parameters
----------
names : str, Iterable of Hashable or None, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, default: False
If True, remove coordinates instead of converting them into
variables.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "temperature": (
... ["time", "lat", "lon"],
... [[[25, 26], [27, 28]], [[29, 30], [31, 32]]],
... ),
... "precipitation": (
... ["time", "lat", "lon"],
... [[[0.5, 0.8], [0.2, 0.4]], [[0.3, 0.6], [0.7, 0.9]]],
... ),
... },
... coords={
... "time": pd.date_range(start="2023-01-01", periods=2),
... "lat": [40, 41],
... "lon": [-80, -79],
... "altitude": 1000,
... },
... )
# Dataset before resetting coordinates
>>> dataset
<xarray.Dataset> Size: 184B
Dimensions: (time: 2, lat: 2, lon: 2)
Coordinates:
* time (time) datetime64[ns] 16B 2023-01-01 2023-01-02
* lat (lat) int64 16B 40 41
* lon (lon) int64 16B -80 -79
altitude int64 8B 1000
Data variables:
temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32
precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9
# Reset the 'altitude' coordinate
>>> dataset_reset = dataset.reset_coords("altitude")
# Dataset after resetting coordinates
>>> dataset_reset
<xarray.Dataset> Size: 184B
Dimensions: (time: 2, lat: 2, lon: 2)
Coordinates:
* time (time) datetime64[ns] 16B 2023-01-01 2023-01-02
* lat (lat) int64 16B 40 41
* lon (lon) int64 16B -80 -79
Data variables:
temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32
precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9
altitude int64 8B 1000
Returns
-------
Dataset
See Also
--------
Dataset.set_coords
"""
if names is None:
names = self._coord_names - set(self._indexes)
else:
if isinstance(names, str) or not isinstance(names, Iterable):
names = [names]
else:
names = list(names)
self._assert_all_in_dataset(names)
bad_coords = set(names) & set(self._indexes)
if bad_coords:
raise ValueError(
f"cannot remove index coordinates with reset_coords: {bad_coords}"
)
obj = self.copy()
obj._coord_names.difference_update(names)
if drop:
for name in names:
del obj._variables[name]
return obj
| reset_coords |
xarray | 71 | xarray/backends/api.py | def save_mfdataset(
datasets,
paths,
mode="w",
format=None,
groups=None,
engine=None,
compute=True,
**kwargs,
):
"""Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of Dataset
List of datasets to save.
paths : list of str or list of path-like objects
List of paths to which to save each corresponding dataset.
mode : {"w", "a"}, optional
Write ("w") or append ("a") mode. If mode="w", any existing file at
these locations will be overwritten.
format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \
"NETCDF3_CLASSIC"}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format="NETCDF4"). The groups will be created
if necessary.
engine : {"netcdf4", "scipy", "h5netcdf"}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for "netcdf4" if writing to a file on disk.
See `Dataset.to_netcdf` for additional information.
compute : bool
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
**kwargs : dict, optional
Additional arguments are passed along to ``to_netcdf``.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> ds = xr.Dataset(
... {"a": ("time", np.linspace(0, 1, 48))},
... coords={"time": pd.date_range("2010-01-01", freq="ME", periods=48)},
... )
>>> ds
<xarray.Dataset> Size: 768B
Dimensions: (time: 48)
Coordinates:
* time (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31
Data variables:
a (time) float64 384B 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0
>>> years, datasets = zip(*ds.groupby("time.year"))
>>> paths = [f"{y}.nc" for y in years]
>>> xr.save_mfdataset(datasets, paths)
"""
| /usr/src/app/target_test_cases/failed_tests_save_mfdataset.txt | def save_mfdataset(
datasets,
paths,
mode="w",
format=None,
groups=None,
engine=None,
compute=True,
**kwargs,
):
"""Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of Dataset
List of datasets to save.
paths : list of str or list of path-like objects
List of paths to which to save each corresponding dataset.
mode : {"w", "a"}, optional
Write ("w") or append ("a") mode. If mode="w", any existing file at
these locations will be overwritten.
format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \
"NETCDF3_CLASSIC"}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format="NETCDF4"). The groups will be created
if necessary.
engine : {"netcdf4", "scipy", "h5netcdf"}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for "netcdf4" if writing to a file on disk.
See `Dataset.to_netcdf` for additional information.
compute : bool
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
**kwargs : dict, optional
Additional arguments are passed along to ``to_netcdf``.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> ds = xr.Dataset(
... {"a": ("time", np.linspace(0, 1, 48))},
... coords={"time": pd.date_range("2010-01-01", freq="ME", periods=48)},
... )
>>> ds
<xarray.Dataset> Size: 768B
Dimensions: (time: 48)
Coordinates:
* time (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31
Data variables:
a (time) float64 384B 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0
>>> years, datasets = zip(*ds.groupby("time.year"))
>>> paths = [f"{y}.nc" for y in years]
>>> xr.save_mfdataset(datasets, paths)
"""
if mode == "w" and len(set(paths)) < len(paths):
raise ValueError(
"cannot use mode='w' when writing multiple datasets to the same path"
)
for obj in datasets:
if not isinstance(obj, Dataset):
raise TypeError(
"save_mfdataset only supports writing Dataset "
f"objects, received type {type(obj)}"
)
if groups is None:
groups = [None] * len(datasets)
if len({len(datasets), len(paths), len(groups)}) > 1:
raise ValueError(
"must supply lists of the same length for the "
"datasets, paths and groups arguments to "
"save_mfdataset"
)
writers, stores = zip(
*[
to_netcdf(
ds,
path,
mode,
format,
group,
engine,
compute=compute,
multifile=True,
**kwargs,
)
for ds, path, group in zip(datasets, paths, groups, strict=True)
],
strict=True,
)
try:
writes = [w.sync(compute=compute) for w in writers]
finally:
if compute:
for store in stores:
store.close()
if not compute:
import dask
return dask.delayed(
[
dask.delayed(_finalize_store)(w, s)
for w, s in zip(writes, stores, strict=True)
]
)
| save_mfdataset |
xarray | 72 | xarray/core/dataset.py | def sel(
self,
indexers: Mapping[Any, Any] | None = None,
method: str | None = None,
tolerance: int | float | Iterable[int | float] | None = None,
drop: bool = False,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with each array indexed by tick labels
along the specified dimension(s).
In contrast to `Dataset.isel`, indexers for this method should use
labels instead of integers.
Under the hood, this method is powered by using pandas's powerful Index
objects. This makes label based indexing essentially just as fast as
using integer indexing.
It also means this method uses pandas's (well documented) logic for
indexing. This means you can use string shortcuts for datetime indexes
(e.g., '2000-01' to select all values in January 2000). It also means
that slices are treated as inclusive of both the start and stop values,
unlike normal Python indexing.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by scalars, slices or arrays of tick labels. For dimensions with
multi-index, the indexer may also be a dict-like object with keys
matching index level names.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional
Method to use for inexact matches:
* None (default): only exact matches
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
drop : bool, optional
If ``drop=True``, drop coordinates variables in `indexers` instead
of making them scalar.
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
variable and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.isel
DataArray.sel
:doc:`xarray-tutorial:intermediate/indexing/indexing`
Tutorial material on indexing with Xarray objects
:doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic`
Tutorial material on basics of indexing
"""
| /usr/src/app/target_test_cases/failed_tests_sel.txt | def sel(
self,
indexers: Mapping[Any, Any] | None = None,
method: str | None = None,
tolerance: int | float | Iterable[int | float] | None = None,
drop: bool = False,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with each array indexed by tick labels
along the specified dimension(s).
In contrast to `Dataset.isel`, indexers for this method should use
labels instead of integers.
Under the hood, this method is powered by using pandas's powerful Index
objects. This makes label based indexing essentially just as fast as
using integer indexing.
It also means this method uses pandas's (well documented) logic for
indexing. This means you can use string shortcuts for datetime indexes
(e.g., '2000-01' to select all values in January 2000). It also means
that slices are treated as inclusive of both the start and stop values,
unlike normal Python indexing.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by scalars, slices or arrays of tick labels. For dimensions with
multi-index, the indexer may also be a dict-like object with keys
matching index level names.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional
Method to use for inexact matches:
* None (default): only exact matches
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
drop : bool, optional
If ``drop=True``, drop coordinates variables in `indexers` instead
of making them scalar.
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
variable and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.isel
DataArray.sel
:doc:`xarray-tutorial:intermediate/indexing/indexing`
Tutorial material on indexing with Xarray objects
:doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic`
Tutorial material on basics of indexing
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "sel")
query_results = map_index_queries(
self, indexers=indexers, method=method, tolerance=tolerance
)
if drop:
no_scalar_variables = {}
for k, v in query_results.variables.items():
if v.dims:
no_scalar_variables[k] = v
else:
if k in self._coord_names:
query_results.drop_coords.append(k)
query_results.variables = no_scalar_variables
result = self.isel(indexers=query_results.dim_indexers, drop=drop)
return result._overwrite_indexes(*query_results.as_tuple()[1:])
| sel |
xarray | 73 | xarray/core/dataset.py | def set_coords(self, names: Hashable | Iterable[Hashable]) -> Self:
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : hashable or iterable of hashable
Name(s) of variables in this dataset to convert into coordinates.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "pressure": ("time", [1.013, 1.2, 3.5]),
... "time": pd.date_range("2023-01-01", periods=3),
... }
... )
>>> dataset
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03
Data variables:
pressure (time) float64 24B 1.013 1.2 3.5
>>> dataset.set_coords("pressure")
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
pressure (time) float64 24B 1.013 1.2 3.5
* time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03
Data variables:
*empty*
On calling ``set_coords`` , these data variables are converted to coordinates, as shown in the final dataset.
Returns
-------
Dataset
See Also
--------
Dataset.swap_dims
Dataset.assign_coords
"""
| /usr/src/app/target_test_cases/failed_tests_set_coords.txt | def set_coords(self, names: Hashable | Iterable[Hashable]) -> Self:
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : hashable or iterable of hashable
Name(s) of variables in this dataset to convert into coordinates.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "pressure": ("time", [1.013, 1.2, 3.5]),
... "time": pd.date_range("2023-01-01", periods=3),
... }
... )
>>> dataset
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03
Data variables:
pressure (time) float64 24B 1.013 1.2 3.5
>>> dataset.set_coords("pressure")
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
pressure (time) float64 24B 1.013 1.2 3.5
* time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03
Data variables:
*empty*
On calling ``set_coords`` , these data variables are converted to coordinates, as shown in the final dataset.
Returns
-------
Dataset
See Also
--------
Dataset.swap_dims
Dataset.assign_coords
"""
# TODO: allow inserting new coordinates with this method, like
# DataFrame.set_index?
# nb. check in self._variables, not self.data_vars to insure that the
# operation is idempotent
if isinstance(names, str) or not isinstance(names, Iterable):
names = [names]
else:
names = list(names)
self._assert_all_in_dataset(names)
obj = self.copy()
obj._coord_names.update(names)
return obj
| set_coords |
xarray | 74 | xarray/coding/cftimeindex.py | def shift( # type: ignore[override] # freq is typed Any, we are more precise
self,
periods: int | float,
freq: str | timedelta | BaseCFTimeOffset | None = None,
) -> Self:
"""Shift the CFTimeIndex a multiple of the given frequency.
See the documentation for :py:func:`~xarray.cftime_range` for a
complete listing of valid frequency strings.
Parameters
----------
periods : int, float if freq of days or below
Periods to shift by
freq : str, datetime.timedelta or BaseCFTimeOffset
A frequency string or datetime.timedelta object to shift by
Returns
-------
CFTimeIndex
See Also
--------
pandas.DatetimeIndex.shift
Examples
--------
>>> index = xr.cftime_range("2000", periods=1, freq="ME")
>>> index
CFTimeIndex([2000-01-31 00:00:00],
dtype='object', length=1, calendar='standard', freq=None)
>>> index.shift(1, "ME")
CFTimeIndex([2000-02-29 00:00:00],
dtype='object', length=1, calendar='standard', freq=None)
>>> index.shift(1.5, "D")
CFTimeIndex([2000-02-01 12:00:00],
dtype='object', length=1, calendar='standard', freq=None)
"""
| /usr/src/app/target_test_cases/failed_tests_shift.txt | def shift( # type: ignore[override] # freq is typed Any, we are more precise
self,
periods: int | float,
freq: str | timedelta | BaseCFTimeOffset | None = None,
) -> Self:
"""Shift the CFTimeIndex a multiple of the given frequency.
See the documentation for :py:func:`~xarray.cftime_range` for a
complete listing of valid frequency strings.
Parameters
----------
periods : int, float if freq of days or below
Periods to shift by
freq : str, datetime.timedelta or BaseCFTimeOffset
A frequency string or datetime.timedelta object to shift by
Returns
-------
CFTimeIndex
See Also
--------
pandas.DatetimeIndex.shift
Examples
--------
>>> index = xr.cftime_range("2000", periods=1, freq="ME")
>>> index
CFTimeIndex([2000-01-31 00:00:00],
dtype='object', length=1, calendar='standard', freq=None)
>>> index.shift(1, "ME")
CFTimeIndex([2000-02-29 00:00:00],
dtype='object', length=1, calendar='standard', freq=None)
>>> index.shift(1.5, "D")
CFTimeIndex([2000-02-01 12:00:00],
dtype='object', length=1, calendar='standard', freq=None)
"""
from xarray.coding.cftime_offsets import BaseCFTimeOffset
if freq is None:
# None type is required to be compatible with base pd.Index class
raise TypeError(
f"`freq` argument cannot be None for {type(self).__name__}.shift"
)
if isinstance(freq, timedelta):
return self + periods * freq
if isinstance(freq, str | BaseCFTimeOffset):
from xarray.coding.cftime_offsets import to_offset
return self + periods * to_offset(freq)
raise TypeError(
f"'freq' must be of type str or datetime.timedelta, got {type(freq)}."
)
| shift |
xarray | 75 | xarray/core/_aggregations.py | def sum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
min_count: int | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``sum`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int or None, optional
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``sum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``sum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.sum
dask.array.sum
DataArray.sum
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.sum()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 8.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.sum(skipna=False)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B nan
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.sum(skipna=True, min_count=2)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 8.0
"""
| /usr/src/app/target_test_cases/failed_tests_sum.txt | def sum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
min_count: int | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""
Reduce this Dataset's data by applying ``sum`` along some dimension(s).
Parameters
----------
dim : str, Iterable of Hashable, "..." or None, default: None
Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"``
or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or ``skipna=True`` has not been
implemented (object, datetime64 or timedelta64).
min_count : int or None, optional
The required number of valid values to perform the operation. If
fewer than min_count non-NA values are present the result will be
NA. Only used if skipna is set to True or defaults to True for the
array's dtype. Changed in version 0.17.0: if specified on an integer
array and skipna=True, the result will be a float array.
keep_attrs : bool or None, optional
If True, ``attrs`` will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
**kwargs : Any
Additional keyword arguments passed on to the appropriate array
function for calculating ``sum`` on this object's data.
These could include dask-specific kwargs like ``split_every``.
Returns
-------
reduced : Dataset
New Dataset with ``sum`` applied to its data and the
indicated dimension(s) removed
See Also
--------
numpy.sum
dask.array.sum
DataArray.sum
:ref:`agg`
User guide on reduction or aggregation operations.
Notes
-----
Non-numeric variables will be removed prior to reducing.
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 2, 3, 0, 2, np.nan]),
... dims="time",
... coords=dict(
... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)),
... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])),
... ),
... )
>>> ds = xr.Dataset(dict(da=da))
>>> ds
<xarray.Dataset> Size: 120B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30
labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a'
Data variables:
da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan
>>> ds.sum()
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 8.0
Use ``skipna`` to control whether NaNs are ignored.
>>> ds.sum(skipna=False)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B nan
Specify ``min_count`` for finer control over when NaNs are ignored.
>>> ds.sum(skipna=True, min_count=2)
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
da float64 8B 8.0
"""
return self.reduce(
duck_array_ops.sum,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
)
| sum |
xarray | 76 | xarray/core/dataset.py | def tail(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with the last `n` values of each array
for the specified dimension(s).
Parameters
----------
indexers : dict or int, default: 5
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Examples
--------
>>> activity_names = ["Walking", "Running", "Cycling", "Swimming", "Yoga"]
>>> durations = [30, 45, 60, 45, 60] # in minutes
>>> energies = [150, 300, 250, 400, 100] # in calories
>>> dataset = xr.Dataset(
... {
... "duration": (["activity"], durations),
... "energy_expenditure": (["activity"], energies),
... },
... coords={"activity": activity_names},
... )
>>> sorted_dataset = dataset.sortby("energy_expenditure", ascending=False)
>>> sorted_dataset
<xarray.Dataset> Size: 240B
Dimensions: (activity: 5)
Coordinates:
* activity (activity) <U8 160B 'Swimming' 'Running' ... 'Yoga'
Data variables:
duration (activity) int64 40B 45 45 60 30 60
energy_expenditure (activity) int64 40B 400 300 250 150 100
# Activities with the least energy expenditures using tail()
>>> sorted_dataset.tail(3)
<xarray.Dataset> Size: 144B
Dimensions: (activity: 3)
Coordinates:
* activity (activity) <U8 96B 'Cycling' 'Walking' 'Yoga'
Data variables:
duration (activity) int64 24B 60 30 60
energy_expenditure (activity) int64 24B 250 150 100
>>> sorted_dataset.tail({"activity": 3})
<xarray.Dataset> Size: 144B
Dimensions: (activity: 3)
Coordinates:
* activity (activity) <U8 96B 'Cycling' 'Walking' 'Yoga'
Data variables:
duration (activity) int64 24B 60 30 60
energy_expenditure (activity) int64 24B 250 150 100
See Also
--------
Dataset.head
Dataset.thin
DataArray.tail
"""
| /usr/src/app/target_test_cases/failed_tests_tail.txt | def tail(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with the last `n` values of each array
for the specified dimension(s).
Parameters
----------
indexers : dict or int, default: 5
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Examples
--------
>>> activity_names = ["Walking", "Running", "Cycling", "Swimming", "Yoga"]
>>> durations = [30, 45, 60, 45, 60] # in minutes
>>> energies = [150, 300, 250, 400, 100] # in calories
>>> dataset = xr.Dataset(
... {
... "duration": (["activity"], durations),
... "energy_expenditure": (["activity"], energies),
... },
... coords={"activity": activity_names},
... )
>>> sorted_dataset = dataset.sortby("energy_expenditure", ascending=False)
>>> sorted_dataset
<xarray.Dataset> Size: 240B
Dimensions: (activity: 5)
Coordinates:
* activity (activity) <U8 160B 'Swimming' 'Running' ... 'Yoga'
Data variables:
duration (activity) int64 40B 45 45 60 30 60
energy_expenditure (activity) int64 40B 400 300 250 150 100
# Activities with the least energy expenditures using tail()
>>> sorted_dataset.tail(3)
<xarray.Dataset> Size: 144B
Dimensions: (activity: 3)
Coordinates:
* activity (activity) <U8 96B 'Cycling' 'Walking' 'Yoga'
Data variables:
duration (activity) int64 24B 60 30 60
energy_expenditure (activity) int64 24B 250 150 100
>>> sorted_dataset.tail({"activity": 3})
<xarray.Dataset> Size: 144B
Dimensions: (activity: 3)
Coordinates:
* activity (activity) <U8 96B 'Cycling' 'Walking' 'Yoga'
Data variables:
duration (activity) int64 24B 60 30 60
energy_expenditure (activity) int64 24B 250 150 100
See Also
--------
Dataset.head
Dataset.thin
DataArray.tail
"""
if not indexers_kwargs:
if indexers is None:
indexers = 5
if not isinstance(indexers, int) and not is_dict_like(indexers):
raise TypeError("indexers must be either dict-like or a single integer")
if isinstance(indexers, int):
indexers = {dim: indexers for dim in self.dims}
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "tail")
for k, v in indexers.items():
if not isinstance(v, int):
raise TypeError(
"expected integer type indexer for "
f"dimension {k!r}, found {type(v)!r}"
)
elif v < 0:
raise ValueError(
"expected positive integer as indexer "
f"for dimension {k!r}, found {v}"
)
indexers_slices = {
k: slice(-val, None) if val != 0 else slice(val)
for k, val in indexers.items()
}
return self.isel(indexers_slices)
| tail |
xarray | 77 | xarray/core/dataset.py | def thin(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with each array indexed along every `n`-th
value for the specified dimension(s)
Parameters
----------
indexers : dict or int
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Examples
--------
>>> x_arr = np.arange(0, 26)
>>> x_arr
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25])
>>> x = xr.DataArray(
... np.reshape(x_arr, (2, 13)),
... dims=("x", "y"),
... coords={"x": [0, 1], "y": np.arange(0, 13)},
... )
>>> x_ds = xr.Dataset({"foo": x})
>>> x_ds
<xarray.Dataset> Size: 328B
Dimensions: (x: 2, y: 13)
Coordinates:
* x (x) int64 16B 0 1
* y (y) int64 104B 0 1 2 3 4 5 6 7 8 9 10 11 12
Data variables:
foo (x, y) int64 208B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 25
>>> x_ds.thin(3)
<xarray.Dataset> Size: 88B
Dimensions: (x: 1, y: 5)
Coordinates:
* x (x) int64 8B 0
* y (y) int64 40B 0 3 6 9 12
Data variables:
foo (x, y) int64 40B 0 3 6 9 12
>>> x.thin({"x": 2, "y": 5})
<xarray.DataArray (x: 1, y: 3)> Size: 24B
array([[ 0, 5, 10]])
Coordinates:
* x (x) int64 8B 0
* y (y) int64 24B 0 5 10
See Also
--------
Dataset.head
Dataset.tail
DataArray.thin
"""
| /usr/src/app/target_test_cases/failed_tests_thin.txt | def thin(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with each array indexed along every `n`-th
value for the specified dimension(s)
Parameters
----------
indexers : dict or int
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Examples
--------
>>> x_arr = np.arange(0, 26)
>>> x_arr
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25])
>>> x = xr.DataArray(
... np.reshape(x_arr, (2, 13)),
... dims=("x", "y"),
... coords={"x": [0, 1], "y": np.arange(0, 13)},
... )
>>> x_ds = xr.Dataset({"foo": x})
>>> x_ds
<xarray.Dataset> Size: 328B
Dimensions: (x: 2, y: 13)
Coordinates:
* x (x) int64 16B 0 1
* y (y) int64 104B 0 1 2 3 4 5 6 7 8 9 10 11 12
Data variables:
foo (x, y) int64 208B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 25
>>> x_ds.thin(3)
<xarray.Dataset> Size: 88B
Dimensions: (x: 1, y: 5)
Coordinates:
* x (x) int64 8B 0
* y (y) int64 40B 0 3 6 9 12
Data variables:
foo (x, y) int64 40B 0 3 6 9 12
>>> x.thin({"x": 2, "y": 5})
<xarray.DataArray (x: 1, y: 3)> Size: 24B
array([[ 0, 5, 10]])
Coordinates:
* x (x) int64 8B 0
* y (y) int64 24B 0 5 10
See Also
--------
Dataset.head
Dataset.tail
DataArray.thin
"""
if (
not indexers_kwargs
and not isinstance(indexers, int)
and not is_dict_like(indexers)
):
raise TypeError("indexers must be either dict-like or a single integer")
if isinstance(indexers, int):
indexers = {dim: indexers for dim in self.dims}
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "thin")
for k, v in indexers.items():
if not isinstance(v, int):
raise TypeError(
"expected integer type indexer for "
f"dimension {k!r}, found {type(v)!r}"
)
elif v < 0:
raise ValueError(
"expected positive integer as indexer "
f"for dimension {k!r}, found {v}"
)
elif v == 0:
raise ValueError("step cannot be zero")
indexers_slices = {k: slice(None, None, val) for k, val in indexers.items()}
return self.isel(indexers_slices)
| thin |
xarray | 78 | xarray/core/duck_array_ops.py | def timedelta_to_numeric(value, datetime_unit="ns", dtype=float):
"""Convert a timedelta-like object to numerical values.
Parameters
----------
value : datetime.timedelta, numpy.timedelta64, pandas.Timedelta, str
Time delta representation.
datetime_unit : {Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as}
The time units of the output values. Note that some conversions are not allowed due to
non-linear relationships between units.
dtype : type
The output data type.
"""
| /usr/src/app/target_test_cases/failed_tests_timedelta_to_numeric.txt | def timedelta_to_numeric(value, datetime_unit="ns", dtype=float):
"""Convert a timedelta-like object to numerical values.
Parameters
----------
value : datetime.timedelta, numpy.timedelta64, pandas.Timedelta, str
Time delta representation.
datetime_unit : {Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as}
The time units of the output values. Note that some conversions are not allowed due to
non-linear relationships between units.
dtype : type
The output data type.
"""
import datetime as dt
if isinstance(value, dt.timedelta):
out = py_timedelta_to_float(value, datetime_unit)
elif isinstance(value, np.timedelta64):
out = np_timedelta64_to_float(value, datetime_unit)
elif isinstance(value, pd.Timedelta):
out = pd_timedelta_to_float(value, datetime_unit)
elif isinstance(value, str):
try:
a = pd.to_timedelta(value)
except ValueError as err:
raise ValueError(
f"Could not convert {value!r} to timedelta64 using pandas.to_timedelta"
) from err
return py_timedelta_to_float(a, datetime_unit)
else:
raise TypeError(
f"Expected value of type str, pandas.Timedelta, datetime.timedelta "
f"or numpy.timedelta64, but received {type(value).__name__}"
)
return out.astype(dtype)
| timedelta_to_numeric |
xarray | 79 | xarray/coding/cftimeindex.py | def to_datetimeindex(self, unsafe=False):
"""If possible, convert this index to a pandas.DatetimeIndex.
Parameters
----------
unsafe : bool
Flag to turn off warning when converting from a CFTimeIndex with
a non-standard calendar to a DatetimeIndex (default ``False``).
Returns
-------
pandas.DatetimeIndex
Raises
------
ValueError
If the CFTimeIndex contains dates that are not possible in the
standard calendar or outside the nanosecond-precision range.
Warns
-----
RuntimeWarning
If converting from a non-standard calendar to a DatetimeIndex.
Warnings
--------
Note that for non-standard calendars, this will change the calendar
type of the index. In that case the result of this method should be
used with caution.
Examples
--------
>>> times = xr.cftime_range("2000", periods=2, calendar="gregorian")
>>> times
CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00],
dtype='object', length=2, calendar='standard', freq=None)
>>> times.to_datetimeindex()
DatetimeIndex(['2000-01-01', '2000-01-02'], dtype='datetime64[ns]', freq=None)
"""
| /usr/src/app/target_test_cases/failed_tests_to_datetimeindex.txt | def to_datetimeindex(self, unsafe=False):
"""If possible, convert this index to a pandas.DatetimeIndex.
Parameters
----------
unsafe : bool
Flag to turn off warning when converting from a CFTimeIndex with
a non-standard calendar to a DatetimeIndex (default ``False``).
Returns
-------
pandas.DatetimeIndex
Raises
------
ValueError
If the CFTimeIndex contains dates that are not possible in the
standard calendar or outside the nanosecond-precision range.
Warns
-----
RuntimeWarning
If converting from a non-standard calendar to a DatetimeIndex.
Warnings
--------
Note that for non-standard calendars, this will change the calendar
type of the index. In that case the result of this method should be
used with caution.
Examples
--------
>>> times = xr.cftime_range("2000", periods=2, calendar="gregorian")
>>> times
CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00],
dtype='object', length=2, calendar='standard', freq=None)
>>> times.to_datetimeindex()
DatetimeIndex(['2000-01-01', '2000-01-02'], dtype='datetime64[ns]', freq=None)
"""
if not self._data.size:
return pd.DatetimeIndex([])
nptimes = cftime_to_nptime(self)
calendar = infer_calendar_name(self)
if calendar not in _STANDARD_CALENDARS and not unsafe:
warnings.warn(
"Converting a CFTimeIndex with dates from a non-standard "
f"calendar, {calendar!r}, to a pandas.DatetimeIndex, which uses dates "
"from the standard calendar. This may lead to subtle errors "
"in operations that depend on the length of time between "
"dates.",
RuntimeWarning,
stacklevel=2,
)
return pd.DatetimeIndex(nptimes)
| to_datetimeindex |
xarray | 80 | xarray/plot/utils.py | def values(self) -> DataArray | None:
"""
Return a normalized number array for the unique levels.
Examples
--------
>>> a = xr.DataArray(["b", "a", "a", "b", "c"])
>>> _Normalize(a).values
<xarray.DataArray (dim_0: 5)> Size: 40B
array([3, 1, 1, 3, 5])
Dimensions without coordinates: dim_0
>>> _Normalize(a, width=(18, 36, 72)).values
<xarray.DataArray (dim_0: 5)> Size: 40B
array([45., 18., 18., 45., 72.])
Dimensions without coordinates: dim_0
>>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3])
>>> _Normalize(a).values
<xarray.DataArray (dim_0: 6)> Size: 48B
array([0.5, 0. , 0. , 0.5, 2. , 3. ])
Dimensions without coordinates: dim_0
>>> _Normalize(a, width=(18, 36, 72)).values
<xarray.DataArray (dim_0: 6)> Size: 48B
array([27., 18., 18., 27., 54., 72.])
Dimensions without coordinates: dim_0
>>> _Normalize(a * 0, width=(18, 36, 72)).values
<xarray.DataArray (dim_0: 6)> Size: 48B
array([36., 36., 36., 36., 36., 36.])
Dimensions without coordinates: dim_0
"""
| /usr/src/app/target_test_cases/failed_tests_values.txt | def values(self) -> DataArray | None:
"""
Return a normalized number array for the unique levels.
Examples
--------
>>> a = xr.DataArray(["b", "a", "a", "b", "c"])
>>> _Normalize(a).values
<xarray.DataArray (dim_0: 5)> Size: 40B
array([3, 1, 1, 3, 5])
Dimensions without coordinates: dim_0
>>> _Normalize(a, width=(18, 36, 72)).values
<xarray.DataArray (dim_0: 5)> Size: 40B
array([45., 18., 18., 45., 72.])
Dimensions without coordinates: dim_0
>>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3])
>>> _Normalize(a).values
<xarray.DataArray (dim_0: 6)> Size: 48B
array([0.5, 0. , 0. , 0.5, 2. , 3. ])
Dimensions without coordinates: dim_0
>>> _Normalize(a, width=(18, 36, 72)).values
<xarray.DataArray (dim_0: 6)> Size: 48B
array([27., 18., 18., 27., 54., 72.])
Dimensions without coordinates: dim_0
>>> _Normalize(a * 0, width=(18, 36, 72)).values
<xarray.DataArray (dim_0: 6)> Size: 48B
array([36., 36., 36., 36., 36., 36.])
Dimensions without coordinates: dim_0
"""
if self.data is None:
return None
val: DataArray
if self.data_is_numeric:
val = self.data
else:
arr = self._indexes_centered(self._data_unique_inverse)
val = self.data.copy(data=arr.reshape(self.data.shape))
return self._calc_widths(val)
| values |
xarray | 81 | xarray/testing/strategies.py | def variables(
draw: st.DrawFn,
*,
array_strategy_fn: ArrayStrategyFn | None = None,
dims: st.SearchStrategy[Sequence[Hashable] | Mapping[Hashable, int]] | None = None,
dtype: st.SearchStrategy[np.dtype] | None = None,
attrs: st.SearchStrategy[Mapping] = ATTRS,
) -> xr.Variable:
"""
Generates arbitrary xarray.Variable objects.
Follows the basic signature of the xarray.Variable constructor, but allows passing alternative strategies to
generate either numpy-like array data or dimensions. Also allows specifying the shape or dtype of the wrapped array
up front.
Passing nothing will generate a completely arbitrary Variable (containing a numpy array).
Requires the hypothesis package to be installed.
Parameters
----------
array_strategy_fn: Callable which returns a strategy generating array-likes, optional
Callable must only accept shape and dtype kwargs, and must generate results consistent with its input.
If not passed the default is to generate a small numpy array with one of the supported_dtypes.
dims: Strategy for generating the dimensions, optional
Can either be a strategy for generating a sequence of string dimension names,
or a strategy for generating a mapping of string dimension names to integer lengths along each dimension.
If provided as a mapping the array shape will be passed to array_strategy_fn.
Default is to generate arbitrary dimension names for each axis in data.
dtype: Strategy which generates np.dtype objects, optional
Will be passed in to array_strategy_fn.
Default is to generate any scalar dtype using supported_dtypes.
Be aware that this default set of dtypes includes some not strictly allowed by the array API standard.
attrs: Strategy which generates dicts, optional
Default is to generate a nested attributes dictionary containing arbitrary strings, booleans, integers, Nones,
and numpy arrays.
Returns
-------
variable_strategy
Strategy for generating xarray.Variable objects.
Raises
------
ValueError
If a custom array_strategy_fn returns a strategy which generates an example array inconsistent with the shape
& dtype input passed to it.
Examples
--------
Generate completely arbitrary Variable objects backed by a numpy array:
>>> variables().example() # doctest: +SKIP
<xarray.Variable (żō: 3)>
array([43506, -16, -151], dtype=int32)
>>> variables().example() # doctest: +SKIP
<xarray.Variable (eD: 4, ğŻżÂĕ: 2, T: 2)>
array([[[-10000000., -10000000.],
[-10000000., -10000000.]],
[[-10000000., -10000000.],
[ 0., -10000000.]],
[[ 0., -10000000.],
[-10000000., inf]],
[[ -0., -10000000.],
[-10000000., -0.]]], dtype=float32)
Attributes:
śřĴ: {'ĉ': {'iĥf': array([-30117, -1740], dtype=int16)}}
Generate only Variable objects with certain dimension names:
>>> variables(dims=st.just(["a", "b"])).example() # doctest: +SKIP
<xarray.Variable (a: 5, b: 3)>
array([[ 248, 4294967295, 4294967295],
[2412855555, 3514117556, 4294967295],
[ 111, 4294967295, 4294967295],
[4294967295, 1084434988, 51688],
[ 47714, 252, 11207]], dtype=uint32)
Generate only Variable objects with certain dimension names and lengths:
>>> variables(dims=st.just({"a": 2, "b": 1})).example() # doctest: +SKIP
<xarray.Variable (a: 2, b: 1)>
array([[-1.00000000e+007+3.40282347e+038j],
[-2.75034266e-225+2.22507386e-311j]])
See Also
--------
:ref:`testing.hypothesis`_
"""
| /usr/src/app/target_test_cases/failed_tests_variables.txt | def variables(
draw: st.DrawFn,
*,
array_strategy_fn: ArrayStrategyFn | None = None,
dims: st.SearchStrategy[Sequence[Hashable] | Mapping[Hashable, int]] | None = None,
dtype: st.SearchStrategy[np.dtype] | None = None,
attrs: st.SearchStrategy[Mapping] = ATTRS,
) -> xr.Variable:
"""
Generates arbitrary xarray.Variable objects.
Follows the basic signature of the xarray.Variable constructor, but allows passing alternative strategies to
generate either numpy-like array data or dimensions. Also allows specifying the shape or dtype of the wrapped array
up front.
Passing nothing will generate a completely arbitrary Variable (containing a numpy array).
Requires the hypothesis package to be installed.
Parameters
----------
array_strategy_fn: Callable which returns a strategy generating array-likes, optional
Callable must only accept shape and dtype kwargs, and must generate results consistent with its input.
If not passed the default is to generate a small numpy array with one of the supported_dtypes.
dims: Strategy for generating the dimensions, optional
Can either be a strategy for generating a sequence of string dimension names,
or a strategy for generating a mapping of string dimension names to integer lengths along each dimension.
If provided as a mapping the array shape will be passed to array_strategy_fn.
Default is to generate arbitrary dimension names for each axis in data.
dtype: Strategy which generates np.dtype objects, optional
Will be passed in to array_strategy_fn.
Default is to generate any scalar dtype using supported_dtypes.
Be aware that this default set of dtypes includes some not strictly allowed by the array API standard.
attrs: Strategy which generates dicts, optional
Default is to generate a nested attributes dictionary containing arbitrary strings, booleans, integers, Nones,
and numpy arrays.
Returns
-------
variable_strategy
Strategy for generating xarray.Variable objects.
Raises
------
ValueError
If a custom array_strategy_fn returns a strategy which generates an example array inconsistent with the shape
& dtype input passed to it.
Examples
--------
Generate completely arbitrary Variable objects backed by a numpy array:
>>> variables().example() # doctest: +SKIP
<xarray.Variable (żō: 3)>
array([43506, -16, -151], dtype=int32)
>>> variables().example() # doctest: +SKIP
<xarray.Variable (eD: 4, ğŻżÂĕ: 2, T: 2)>
array([[[-10000000., -10000000.],
[-10000000., -10000000.]],
[[-10000000., -10000000.],
[ 0., -10000000.]],
[[ 0., -10000000.],
[-10000000., inf]],
[[ -0., -10000000.],
[-10000000., -0.]]], dtype=float32)
Attributes:
śřĴ: {'ĉ': {'iĥf': array([-30117, -1740], dtype=int16)}}
Generate only Variable objects with certain dimension names:
>>> variables(dims=st.just(["a", "b"])).example() # doctest: +SKIP
<xarray.Variable (a: 5, b: 3)>
array([[ 248, 4294967295, 4294967295],
[2412855555, 3514117556, 4294967295],
[ 111, 4294967295, 4294967295],
[4294967295, 1084434988, 51688],
[ 47714, 252, 11207]], dtype=uint32)
Generate only Variable objects with certain dimension names and lengths:
>>> variables(dims=st.just({"a": 2, "b": 1})).example() # doctest: +SKIP
<xarray.Variable (a: 2, b: 1)>
array([[-1.00000000e+007+3.40282347e+038j],
[-2.75034266e-225+2.22507386e-311j]])
See Also
--------
:ref:`testing.hypothesis`_
"""
if dtype is None:
dtype = supported_dtypes()
if not isinstance(dims, st.SearchStrategy) and dims is not None:
raise InvalidArgument(
f"dims must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(dims)}. "
"To specify fixed contents, use hypothesis.strategies.just()."
)
if not isinstance(dtype, st.SearchStrategy) and dtype is not None:
raise InvalidArgument(
f"dtype must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(dtype)}. "
"To specify fixed contents, use hypothesis.strategies.just()."
)
if not isinstance(attrs, st.SearchStrategy) and attrs is not None:
raise InvalidArgument(
f"attrs must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(attrs)}. "
"To specify fixed contents, use hypothesis.strategies.just()."
)
_array_strategy_fn: ArrayStrategyFn
if array_strategy_fn is None:
# For some reason if I move the default value to the function signature definition mypy incorrectly says the ignore is no longer necessary, making it impossible to satisfy mypy
_array_strategy_fn = npst.arrays # type: ignore[assignment] # npst.arrays has extra kwargs that we aren't using later
elif not callable(array_strategy_fn):
raise InvalidArgument(
"array_strategy_fn must be a Callable that accepts the kwargs dtype and shape and returns a hypothesis "
"strategy which generates corresponding array-like objects."
)
else:
_array_strategy_fn = (
array_strategy_fn # satisfy mypy that this new variable cannot be None
)
_dtype = draw(dtype)
if dims is not None:
# generate dims first then draw data to match
_dims = draw(dims)
if isinstance(_dims, Sequence):
dim_names = list(_dims)
valid_shapes = npst.array_shapes(min_dims=len(_dims), max_dims=len(_dims))
_shape = draw(valid_shapes)
array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
elif isinstance(_dims, Mapping | dict):
# should be a mapping of form {dim_names: lengths}
dim_names, _shape = list(_dims.keys()), tuple(_dims.values())
array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
else:
raise InvalidArgument(
f"Invalid type returned by dims strategy - drew an object of type {type(dims)}"
)
else:
# nothing provided, so generate everything consistently
# We still generate the shape first here just so that we always pass shape to array_strategy_fn
_shape = draw(npst.array_shapes())
array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
dim_names = draw(dimension_names(min_dims=len(_shape), max_dims=len(_shape)))
_data = draw(array_strategy)
if _data.shape != _shape:
raise ValueError(
"array_strategy_fn returned an array object with a different shape than it was passed."
f"Passed {_shape}, but returned {_data.shape}."
"Please either specify a consistent shape via the dims kwarg or ensure the array_strategy_fn callable "
"obeys the shape argument passed to it."
)
if _data.dtype != _dtype:
raise ValueError(
"array_strategy_fn returned an array object with a different dtype than it was passed."
f"Passed {_dtype}, but returned {_data.dtype}"
"Please either specify a consistent dtype via the dtype kwarg or ensure the array_strategy_fn callable "
"obeys the dtype argument passed to it."
)
return xr.Variable(dims=dim_names, data=_data, attrs=draw(attrs))
| variables |
xarray | 82 | xarray/core/computation.py | def where(cond, x, y, keep_attrs=None):
"""Return elements from `x` or `y` depending on `cond`.
Performs xarray-like broadcasting across input arguments.
All dimension coordinates on `x` and `y` must be aligned with each
other and with `cond`.
Parameters
----------
cond : scalar, array, Variable, DataArray or Dataset
When True, return values from `x`, otherwise returns values from `y`.
x : scalar, array, Variable, DataArray or Dataset
values to choose from where `cond` is True
y : scalar, array, Variable, DataArray or Dataset
values to choose from where `cond` is False
keep_attrs : bool or str or callable, optional
How to treat attrs. If True, keep the attrs of `x`.
Returns
-------
Dataset, DataArray, Variable or array
In priority order: Dataset, DataArray, Variable or array, whichever
type appears as an input argument.
Examples
--------
>>> x = xr.DataArray(
... 0.1 * np.arange(10),
... dims=["lat"],
... coords={"lat": np.arange(10)},
... name="sst",
... )
>>> x
<xarray.DataArray 'sst' (lat: 10)> Size: 80B
array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
Coordinates:
* lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9
>>> xr.where(x < 0.5, x, x * 100)
<xarray.DataArray 'sst' (lat: 10)> Size: 80B
array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ])
Coordinates:
* lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9
>>> y = xr.DataArray(
... 0.1 * np.arange(9).reshape(3, 3),
... dims=["lat", "lon"],
... coords={"lat": np.arange(3), "lon": 10 + np.arange(3)},
... name="sst",
... )
>>> y
<xarray.DataArray 'sst' (lat: 3, lon: 3)> Size: 72B
array([[0. , 0.1, 0.2],
[0.3, 0.4, 0.5],
[0.6, 0.7, 0.8]])
Coordinates:
* lat (lat) int64 24B 0 1 2
* lon (lon) int64 24B 10 11 12
>>> xr.where(y.lat < 1, y, -1)
<xarray.DataArray (lat: 3, lon: 3)> Size: 72B
array([[ 0. , 0.1, 0.2],
[-1. , -1. , -1. ],
[-1. , -1. , -1. ]])
Coordinates:
* lat (lat) int64 24B 0 1 2
* lon (lon) int64 24B 10 11 12
>>> cond = xr.DataArray([True, False], dims=["x"])
>>> x = xr.DataArray([1, 2], dims=["y"])
>>> xr.where(cond, x, 0)
<xarray.DataArray (x: 2, y: 2)> Size: 32B
array([[1, 2],
[0, 0]])
Dimensions without coordinates: x, y
See Also
--------
numpy.where : corresponding numpy function
Dataset.where, DataArray.where :
equivalent methods
"""
| /usr/src/app/target_test_cases/failed_tests_where.txt | def where(cond, x, y, keep_attrs=None):
"""Return elements from `x` or `y` depending on `cond`.
Performs xarray-like broadcasting across input arguments.
All dimension coordinates on `x` and `y` must be aligned with each
other and with `cond`.
Parameters
----------
cond : scalar, array, Variable, DataArray or Dataset
When True, return values from `x`, otherwise returns values from `y`.
x : scalar, array, Variable, DataArray or Dataset
values to choose from where `cond` is True
y : scalar, array, Variable, DataArray or Dataset
values to choose from where `cond` is False
keep_attrs : bool or str or callable, optional
How to treat attrs. If True, keep the attrs of `x`.
Returns
-------
Dataset, DataArray, Variable or array
In priority order: Dataset, DataArray, Variable or array, whichever
type appears as an input argument.
Examples
--------
>>> x = xr.DataArray(
... 0.1 * np.arange(10),
... dims=["lat"],
... coords={"lat": np.arange(10)},
... name="sst",
... )
>>> x
<xarray.DataArray 'sst' (lat: 10)> Size: 80B
array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
Coordinates:
* lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9
>>> xr.where(x < 0.5, x, x * 100)
<xarray.DataArray 'sst' (lat: 10)> Size: 80B
array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ])
Coordinates:
* lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9
>>> y = xr.DataArray(
... 0.1 * np.arange(9).reshape(3, 3),
... dims=["lat", "lon"],
... coords={"lat": np.arange(3), "lon": 10 + np.arange(3)},
... name="sst",
... )
>>> y
<xarray.DataArray 'sst' (lat: 3, lon: 3)> Size: 72B
array([[0. , 0.1, 0.2],
[0.3, 0.4, 0.5],
[0.6, 0.7, 0.8]])
Coordinates:
* lat (lat) int64 24B 0 1 2
* lon (lon) int64 24B 10 11 12
>>> xr.where(y.lat < 1, y, -1)
<xarray.DataArray (lat: 3, lon: 3)> Size: 72B
array([[ 0. , 0.1, 0.2],
[-1. , -1. , -1. ],
[-1. , -1. , -1. ]])
Coordinates:
* lat (lat) int64 24B 0 1 2
* lon (lon) int64 24B 10 11 12
>>> cond = xr.DataArray([True, False], dims=["x"])
>>> x = xr.DataArray([1, 2], dims=["y"])
>>> xr.where(cond, x, 0)
<xarray.DataArray (x: 2, y: 2)> Size: 32B
array([[1, 2],
[0, 0]])
Dimensions without coordinates: x, y
See Also
--------
numpy.where : corresponding numpy function
Dataset.where, DataArray.where :
equivalent methods
"""
from xarray.core.dataset import Dataset
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
# alignment for three arguments is complicated, so don't support it yet
result = apply_ufunc(
duck_array_ops.where,
cond,
x,
y,
join="exact",
dataset_join="exact",
dask="allowed",
keep_attrs=keep_attrs,
)
# keep the attributes of x, the second parameter, by default to
# be consistent with the `where` method of `DataArray` and `Dataset`
# rebuild the attrs from x at each level of the output, which could be
# Dataset, DataArray, or Variable, and also handle coords
if keep_attrs is True and hasattr(result, "attrs"):
if isinstance(y, Dataset) and not isinstance(x, Dataset):
# handle special case where x gets promoted to Dataset
result.attrs = {}
if getattr(x, "name", None) in result.data_vars:
result[x.name].attrs = getattr(x, "attrs", {})
else:
# otherwise, fill in global attrs and variable attrs (if they exist)
result.attrs = getattr(x, "attrs", {})
for v in getattr(result, "data_vars", []):
result[v].attrs = getattr(getattr(x, v, None), "attrs", {})
for c in getattr(result, "coords", []):
# always fill coord attrs of x
result[c].attrs = getattr(getattr(x, c, None), "attrs", {})
return result
| where |
datasets | 0 | src/datasets/features/audio.py | def decode_example(
self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
) -> dict:
"""Decode example audio file into audio data.
Args:
value (`dict`):
A dictionary with keys:
- `path`: String with relative audio file path.
- `bytes`: Bytes of the audio file.
token_per_repo_id (`dict`, *optional*):
To access and decode
audio files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`)
Returns:
`dict`
"""
| /usr/src/app/target_test_cases/failed_tests_Audio.decode_example.txt | def decode_example(
self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
) -> dict:
"""Decode example audio file into audio data.
Args:
value (`dict`):
A dictionary with keys:
- `path`: String with relative audio file path.
- `bytes`: Bytes of the audio file.
token_per_repo_id (`dict`, *optional*):
To access and decode
audio files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`)
Returns:
`dict`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
if file is None:
token_per_repo_id = token_per_repo_id or {}
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id[repo_id]
except (ValueError, KeyError):
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
array, sampling_rate = sf.read(f)
else:
array, sampling_rate = sf.read(file)
array = array.T
if self.mono:
array = librosa.to_mono(array)
if self.sampling_rate and self.sampling_rate != sampling_rate:
array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
sampling_rate = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
| Audio.decode_example |
datasets | 1 | src/datasets/features/features.py | def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
"""Cast an Arrow array to the `ClassLabel` arrow storage type.
The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
- `pa.string()`
- `pa.int()`
Args:
storage (`Union[pa.StringArray, pa.IntegerArray]`):
PyArrow array to cast.
Returns:
`pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
"""
| /usr/src/app/target_test_cases/failed_tests_ClassLabel.cast_storage.txt | def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
"""Cast an Arrow array to the `ClassLabel` arrow storage type.
The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
- `pa.string()`
- `pa.int()`
Args:
storage (`Union[pa.StringArray, pa.IntegerArray]`):
PyArrow array to cast.
Returns:
`pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
"""
if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
min_max = pc.min_max(storage).as_py()
if min_max["max"] is not None and min_max["max"] >= self.num_classes:
raise ValueError(
f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
)
elif isinstance(storage, pa.StringArray):
storage = pa.array(
[self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
)
return array_cast(storage, self.pa_type)
| ClassLabel.cast_storage |