repository
stringclasses
11 values
repo_id
stringlengths
1
3
target_module_path
stringlengths
16
72
prompt
stringlengths
298
21.7k
relavent_test_path
stringlengths
50
99
full_function
stringlengths
336
33.8k
function_name
stringlengths
2
51
datasets
2
src/datasets/features/features.py
def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]: """Conversion `integer` => class name `string`. Regarding unknown/missing labels: passing negative integers raises `ValueError`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].int2str(0) 'neg' ``` """
/usr/src/app/target_test_cases/failed_tests_ClassLabel.int2str.txt
def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]: """Conversion `integer` => class name `string`. Regarding unknown/missing labels: passing negative integers raises `ValueError`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].int2str(0) 'neg' ``` """ if not isinstance(values, int) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, int): values = [values] return_list = False for v in values: if not 0 <= v < self.num_classes: raise ValueError(f"Invalid integer class label {v:d}") output = [self._int2str[int(v)] for v in values] return output if return_list else output[0]
ClassLabel.int2str
datasets
3
src/datasets/dataset_dict.py
def flatten(self, max_depth=16) -> "DatasetDict": """Flatten the Apache Arrow Table of each split (nested features are flatten). Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad") >>> ds["train"].features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} >>> ds.flatten() DatasetDict({ train: Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) validation: Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 10570 }) }) ``` """
/usr/src/app/target_test_cases/failed_tests_DatasetDict.flatten.txt
def flatten(self, max_depth=16) -> "DatasetDict": """Flatten the Apache Arrow Table of each split (nested features are flatten). Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad") >>> ds["train"].features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} >>> ds.flatten() DatasetDict({ train: Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) validation: Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 10570 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()})
DatasetDict.flatten
datasets
4
src/datasets/dataset_dict.py
def push_to_hub( self, repo_id, config_name: str = "default", set_default: Optional[bool] = None, data_dir: Optional[str] = None, commit_message: Optional[str] = None, commit_description: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, revision: Optional[str] = None, create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, num_shards: Optional[Dict[str, int]] = None, embed_external_files: bool = True, ) -> CommitInfo: """Pushes the [`DatasetDict`] to the hub as a Parquet dataset. The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed. Each dataset split will be pushed independently. The pushed dataset will keep the original split names. The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`] data, the Parquet files will store the bytes of your images or audio files. You can disable this by setting `embed_external_files` to False. Args: repo_id (`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. config_name (`str`): Configuration name of a dataset. Defaults to "default". set_default (`bool`, *optional*): Whether to set this configuration as the default one. Otherwise, the default configuration is the one named "default". data_dir (`str`, *optional*): Directory name that will contain the uploaded data files. Defaults to the `config_name` if different from "default", else "data". <Added version="2.17.0"/> commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload dataset"`. commit_description (`str`, *optional*): Description of the commit that will be created. Additionally, description of the PR if a PR is created (`create_pr` is True). <Added version="2.16.0"/> private (`bool`, *optional*): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (`str`, *optional*): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with `huggingface-cli login`. Will raise an error if no token is passed and the user is not logged-in. revision (`str`, *optional*): Branch to push the uploaded files to. Defaults to the `"main"` branch. <Added version="2.15.0"/> create_pr (`bool`, *optional*, defaults to `False`): Whether to create a PR with the uploaded files or directly commit. <Added version="2.15.0"/> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"500MB"` or `"1GB"`). num_shards (`Dict[str, int]`, *optional*): Number of shards to write. By default, the number of shards depends on `max_shard_size`. Use a dictionary to define a different num_shards for each split. <Added version="2.8.0"/> embed_external_files (`bool`, defaults to `True`): Whether to embed file bytes in the shards. In particular, this will do the following before the push for the fields of type: - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files. Return: huggingface_hub.CommitInfo Example: ```python >>> dataset_dict.push_to_hub("<organization>/<dataset_id>") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True) >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8}) ``` If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages): ```python >>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en") >>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr") >>> # later >>> english_dataset = load_dataset("<organization>/<dataset_id>", "en") >>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr") ``` """
/usr/src/app/target_test_cases/failed_tests_DatasetDict.push_to_hub.txt
def push_to_hub( self, repo_id, config_name: str = "default", set_default: Optional[bool] = None, data_dir: Optional[str] = None, commit_message: Optional[str] = None, commit_description: Optional[str] = None, private: Optional[bool] = False, token: Optional[str] = None, revision: Optional[str] = None, create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, num_shards: Optional[Dict[str, int]] = None, embed_external_files: bool = True, ) -> CommitInfo: """Pushes the [`DatasetDict`] to the hub as a Parquet dataset. The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed. Each dataset split will be pushed independently. The pushed dataset will keep the original split names. The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`] data, the Parquet files will store the bytes of your images or audio files. You can disable this by setting `embed_external_files` to False. Args: repo_id (`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. config_name (`str`): Configuration name of a dataset. Defaults to "default". set_default (`bool`, *optional*): Whether to set this configuration as the default one. Otherwise, the default configuration is the one named "default". data_dir (`str`, *optional*): Directory name that will contain the uploaded data files. Defaults to the `config_name` if different from "default", else "data". <Added version="2.17.0"/> commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload dataset"`. commit_description (`str`, *optional*): Description of the commit that will be created. Additionally, description of the PR if a PR is created (`create_pr` is True). <Added version="2.16.0"/> private (`bool`, *optional*): Whether the dataset repository should be set to private or not. Only affects repository creation: a repository that already exists will not be affected by that parameter. token (`str`, *optional*): An optional authentication token for the Hugging Face Hub. If no token is passed, will default to the token saved locally when logging in with `huggingface-cli login`. Will raise an error if no token is passed and the user is not logged-in. revision (`str`, *optional*): Branch to push the uploaded files to. Defaults to the `"main"` branch. <Added version="2.15.0"/> create_pr (`bool`, *optional*, defaults to `False`): Whether to create a PR with the uploaded files or directly commit. <Added version="2.15.0"/> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"500MB"` or `"1GB"`). num_shards (`Dict[str, int]`, *optional*): Number of shards to write. By default, the number of shards depends on `max_shard_size`. Use a dictionary to define a different num_shards for each split. <Added version="2.8.0"/> embed_external_files (`bool`, defaults to `True`): Whether to embed file bytes in the shards. In particular, this will do the following before the push for the fields of type: - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files. Return: huggingface_hub.CommitInfo Example: ```python >>> dataset_dict.push_to_hub("<organization>/<dataset_id>") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True) >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8}) ``` If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages): ```python >>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en") >>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr") >>> # later >>> english_dataset = load_dataset("<organization>/<dataset_id>", "en") >>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr") ``` """ if num_shards is None: num_shards = {k: None for k in self} elif not isinstance(num_shards, dict): raise ValueError( "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}" ) self._check_values_type() self._check_values_features() total_uploaded_size = 0 total_dataset_nbytes = 0 info_to_dump: DatasetInfo = next(iter(self.values())).info.copy() info_to_dump.config_name = config_name info_to_dump.splits = SplitDict() for split in self.keys(): if not re.match(_split_re, split): raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.") api = HfApi(endpoint=config.HF_ENDPOINT, token=token) repo_url = api.create_repo( repo_id, token=token, repo_type="dataset", private=private, exist_ok=True, ) repo_id = repo_url.repo_id if revision is not None and not revision.startswith("refs/pr/"): # We do not call create_branch for a PR reference: 400 Bad Request api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True) if not data_dir: data_dir = config_name if config_name != "default" else "data" # for backward compatibility additions = [] for split in self.keys(): logger.info(f"Pushing split {split} to the Hub.") # The split=key needs to be removed before merging split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub( repo_id, data_dir=data_dir, split=split, token=token, revision=revision, create_pr=create_pr, max_shard_size=max_shard_size, num_shards=num_shards.get(split), embed_external_files=embed_external_files, ) additions += split_additions total_uploaded_size += uploaded_size total_dataset_nbytes += dataset_nbytes info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split])) info_to_dump.download_checksums = None info_to_dump.download_size = total_uploaded_size info_to_dump.dataset_size = total_dataset_nbytes info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern) # and delete old split shards (if they exist) repo_with_dataset_card, repo_with_dataset_infos = False, False repo_splits = [] # use a list to keep the order of the splits deletions = [] repo_files_to_add = [addition.path_in_repo for addition in additions] for repo_file in api.list_repo_tree( repo_id=repo_id, revision=revision, repo_type="dataset", token=token, recursive=True ): if not isinstance(repo_file, RepoFile): continue if repo_file.rfilename == config.REPOCARD_FILENAME: repo_with_dataset_card = True elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME: repo_with_dataset_infos = True elif ( repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys())) and repo_file.rfilename not in repo_files_to_add ): deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) elif fnmatch.fnmatch( repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*") ): repo_split = string_to_dict( repo_file.rfilename, glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED), )["split"] if repo_split not in repo_splits: repo_splits.append(split) # get the info from the README to update them if repo_with_dataset_card: dataset_card_path = api.hf_hub_download( repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision ) dataset_card = DatasetCard.load(Path(dataset_card_path)) dataset_card_data = dataset_card.data metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) # get the deprecated dataset_infos.json to update them elif repo_with_dataset_infos: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() else: dataset_card = None dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed if not metadata_configs and repo_splits: default_metadata_configs_to_dump = { "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits] } MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) metadata_config_to_dump = { "data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()], } if set_default and config_name != "default": if metadata_configs: default_config_name = metadata_configs.get_default_config_name() if default_config_name == "default": raise ValueError( "There exists a configuration named 'default'. To set a different configuration as default, " "rename the 'default' one first." ) else: _ = metadata_configs[default_config_name].pop("default") metadata_config_to_dump["default"] = True # push to the deprecated dataset_infos.json if repo_with_dataset_infos: dataset_infos_path = api.hf_hub_download( repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision ) with open(dataset_infos_path, encoding="utf-8") as f: dataset_infos: dict = json.load(f) dataset_infos[config_name] = asdict(info_to_dump) buffer = BytesIO() buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8")) additions.append( CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer) ) # push to README DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data) MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data) dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card additions.append( CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode()) ) commit_message = commit_message if commit_message is not None else "Upload dataset" if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: commit_info = api.create_commit( repo_id, operations=additions + deletions, commit_message=commit_message, commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) else: logger.info( f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits." ) num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT) for i in range(0, num_commits): operations = additions[ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT ] + (deletions if i == 0 else []) commit_info = api.create_commit( repo_id, operations=operations, commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})", commit_description=commit_description, token=token, repo_type="dataset", revision=revision, create_pr=create_pr, ) logger.info( f"Commit #{i+1} completed" + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "") + "." ) return commit_info
DatasetDict.push_to_hub
datasets
5
src/datasets/dataset_dict.py
def save_to_disk( self, dataset_dict_path: PathLike, max_shard_size: Optional[Union[str, int]] = None, num_shards: Optional[Dict[str, int]] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, ): """ Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`. For [`Image`] and [`Audio`] data: All the Image() and Audio() data are stored in the arrow files. If you want to store paths or urls, please use the Value("string") type. Args: dataset_dict_path (`path-like`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be saved to. max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"50MB"`). num_shards (`Dict[str, int]`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`. You need to provide the number of shards for each dataset in the dataset dictionary. Use a dictionary to define a different num_shards for each split. <Added version="2.8.0"/> num_proc (`int`, *optional*, default `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.8.0"/> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Example: ```python >>> dataset_dict.save_to_disk("path/to/dataset/directory") >>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB") >>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8}) ``` """
/usr/src/app/target_test_cases/failed_tests_DatasetDict.save_to_disk.txt
def save_to_disk( self, dataset_dict_path: PathLike, max_shard_size: Optional[Union[str, int]] = None, num_shards: Optional[Dict[str, int]] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, ): """ Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`. For [`Image`] and [`Audio`] data: All the Image() and Audio() data are stored in the arrow files. If you want to store paths or urls, please use the Value("string") type. Args: dataset_dict_path (`path-like`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be saved to. max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"50MB"`). num_shards (`Dict[str, int]`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`. You need to provide the number of shards for each dataset in the dataset dictionary. Use a dictionary to define a different num_shards for each split. <Added version="2.8.0"/> num_proc (`int`, *optional*, default `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.8.0"/> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Example: ```python >>> dataset_dict.save_to_disk("path/to/dataset/directory") >>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB") >>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8}) ``` """ fs: fsspec.AbstractFileSystem fs, _ = url_to_fs(dataset_dict_path, **(storage_options or {})) if num_shards is None: num_shards = {k: None for k in self} elif not isinstance(num_shards, dict): raise ValueError( "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}" ) fs.makedirs(dataset_dict_path, exist_ok=True) with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f: json.dump({"splits": list(self)}, f) for k, dataset in self.items(): dataset.save_to_disk( posixpath.join(dataset_dict_path, k), num_shards=num_shards.get(k), max_shard_size=max_shard_size, num_proc=num_proc, storage_options=storage_options, )
DatasetDict.save_to_disk
datasets
6
src/datasets/info.py
def write_to_directory(self, dataset_info_dir, pretty_print=False, storage_options: Optional[dict] = None): """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`. Args: dataset_info_dir (`str`): Destination directory. pretty_print (`bool`, defaults to `False`): If `True`, the JSON will be pretty-printed with the indent level of 4. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.info.write_to_directory("/path/to/directory/") ``` """
/usr/src/app/target_test_cases/failed_tests_DatasetInfo.write_to_directory.txt
def write_to_directory(self, dataset_info_dir, pretty_print=False, storage_options: Optional[dict] = None): """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`. Args: dataset_info_dir (`str`): Destination directory. pretty_print (`bool`, defaults to `False`): If `True`, the JSON will be pretty-printed with the indent level of 4. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.info.write_to_directory("/path/to/directory/") ``` """ fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {})) with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f: self._dump_info(f, pretty_print=pretty_print) if self.license: with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f: self._dump_license(f)
DatasetInfo.write_to_directory
datasets
7
src/datasets/download/download_manager.py
def download(self, url_or_urls): """Download given URL(s). By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior. Args: url_or_urls (`str` or `list` or `dict`): URL or `list` or `dict` of URLs to download. Each URL is a `str`. Returns: `str` or `list` or `dict`: The downloaded paths matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """
/usr/src/app/target_test_cases/failed_tests_DownloadManager.download.txt
def download(self, url_or_urls): """Download given URL(s). By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior. Args: url_or_urls (`str` or `list` or `dict`): URL or `list` or `dict` of URLs to download. Each URL is a `str`. Returns: `str` or `list` or `dict`: The downloaded paths matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """ download_config = self.download_config.copy() download_config.extract_compressed_file = False if download_config.download_desc is None: download_config.download_desc = "Downloading data" download_func = partial(self._download_batched, download_config=download_config) start_time = datetime.now() with stack_multiprocessing_download_progress_bars(): downloaded_path_or_paths = map_nested( download_func, url_or_urls, map_tuple=True, num_proc=download_config.num_proc, desc="Downloading data files", batched=True, batch_size=-1, ) duration = datetime.now() - start_time logger.info(f"Downloading took {duration.total_seconds() // 60} min") url_or_urls = NestedDataStructure(url_or_urls) downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths) self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()))) start_time = datetime.now() self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) duration = datetime.now() - start_time logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min") return downloaded_path_or_paths.data
DownloadManager.download
datasets
8
src/datasets/download/download_manager.py
def extract(self, path_or_paths): """Extract given path(s). Args: path_or_paths (path or `list` or `dict`): Path of file to extract. Each path is a `str`. Returns: extracted_path(s): `str`, The extracted paths matching the given input path_or_paths. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """
/usr/src/app/target_test_cases/failed_tests_DownloadManager.extract.txt
def extract(self, path_or_paths): """Extract given path(s). Args: path_or_paths (path or `list` or `dict`): Path of file to extract. Each path is a `str`. Returns: extracted_path(s): `str`, The extracted paths matching the given input path_or_paths. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """ download_config = self.download_config.copy() download_config.extract_compressed_file = True extract_func = partial(self._download_single, download_config=download_config) extracted_paths = map_nested( extract_func, path_or_paths, num_proc=download_config.num_proc, desc="Extracting data files", ) path_or_paths = NestedDataStructure(path_or_paths) extracted_paths = NestedDataStructure(extracted_paths) self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten()))) return extracted_paths.data
DownloadManager.extract
datasets
9
src/datasets/download/download_manager.py
def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]): """Iterate over files within an archive. Args: path_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """
/usr/src/app/target_test_cases/failed_tests_DownloadManager.iter_archive.txt
def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]): """Iterate over files within an archive. Args: path_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """ if hasattr(path_or_buf, "read"): return ArchiveIterable.from_buf(path_or_buf) else: return ArchiveIterable.from_urlpath(path_or_buf)
DownloadManager.iter_archive
datasets
10
src/datasets/download/download_manager.py
def iter_files(self, paths: Union[str, List[str]]): """Iterate over file paths. Args: paths (`str` or `list` of `str`): Root paths. Yields: `str`: File path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """
/usr/src/app/target_test_cases/failed_tests_DownloadManager.iter_files.txt
def iter_files(self, paths: Union[str, List[str]]): """Iterate over file paths. Args: paths (`str` or `list` of `str`): Root paths. Yields: `str`: File path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """ return FilesIterable.from_urlpaths(paths)
DownloadManager.iter_files
datasets
11
src/datasets/features/features.py
def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """
/usr/src/app/target_test_cases/failed_tests_Features.copy.txt
def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return copy.deepcopy(self)
Features.copy
datasets
12
src/datasets/features/features.py
def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """
/usr/src/app/target_test_cases/failed_tests_Features.encode_column.txt
def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """ column = cast_to_python_objects(column) return [encode_nested_example(self[column_name], obj, level=1) for obj in column]
Features.encode_column
datasets
13
src/datasets/features/features.py
def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features.flatten() {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None), 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` """
/usr/src/app/target_test_cases/failed_tests_Features.flatten.txt
def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features.flatten() {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None), 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` """ for depth in range(1, max_depth): no_change = True flattened = self.copy() for column_name, subfeature in self.items(): if isinstance(subfeature, dict): no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()}) del flattened[column_name] elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict): no_change = False flattened.update( { f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v] for k, v in subfeature.feature.items() } ) del flattened[column_name] elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature: no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()}) del flattened[column_name] self = flattened if no_change: break return self
Features.flatten
datasets
14
src/datasets/features/features.py
def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example:: >>> from datasets import Features, Sequence, Value >>> # let's say we have two features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})}) >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but makes the fields order match >>> f1.reorder_fields_as(f2) {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)} >>> assert f1.reorder_fields_as(f2).type == f2.type """
/usr/src/app/target_test_cases/failed_tests_Features.reorder_fields_as.txt
def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example:: >>> from datasets import Features, Sequence, Value >>> # let's say we have two features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})}) >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but makes the fields order match >>> f1.reorder_fields_as(f2) {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)} >>> assert f1.reorder_fields_as(f2).type == f2.type """ def recursive_reorder(source, target, stack=""): stack_position = " at " + stack[1:] if stack else "" if isinstance(target, Sequence): target = target.feature if isinstance(target, dict): target = {k: [v] for k, v in target.items()} else: target = [target] if isinstance(source, Sequence): sequence_kwargs = vars(source).copy() source = sequence_kwargs.pop("feature") if isinstance(source, dict): source = {k: [v] for k, v in source.items()} reordered = recursive_reorder(source, target, stack) return Sequence({k: v[0] for k, v in reordered.items()}, **sequence_kwargs) else: source = [source] reordered = recursive_reorder(source, target, stack) return Sequence(reordered[0], **sequence_kwargs) elif isinstance(source, dict): if not isinstance(target, dict): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if sorted(source) != sorted(target): message = ( f"Keys mismatch: between {source} (source) and {target} (target).\n" f"{source.keys()-target.keys()} are missing from target " f"and {target.keys()-source.keys()} are missing from source" + stack_position ) raise ValueError(message) return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target} elif isinstance(source, list): if not isinstance(target, list): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if len(source) != len(target): raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position) return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))] elif isinstance(source, LargeList): if not isinstance(target, LargeList): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) return LargeList(recursive_reorder(source.feature, target.feature, stack)) else: return source return Features(recursive_reorder(self, other))
Features.reorder_fields_as
datasets
15
src/datasets/table.py
def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """
/usr/src/app/target_test_cases/failed_tests_InMemoryTable.cast.txt
def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ return InMemoryTable(table_cast(self.table, *args, **kwargs))
InMemoryTable.cast
datasets
16
src/datasets/table.py
def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """
/usr/src/app/target_test_cases/failed_tests_InMemoryTable.slice.txt
def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ # Use fast slicing here return InMemoryTable(self.fast_slice(offset=offset, length=length))
InMemoryTable.slice
datasets
17
src/datasets/iterable_dataset.py
def cast( self, features: Features, ) -> "IterableDataset": """ Cast the dataset to a new set of features. Args: features ([`Features`]): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset. Returns: `IterableDataset`: A copy of the dataset with casted features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds.features.copy() >>> new_features["label"] = ClassLabel(names=["bad", "good"]) >>> new_features["text"] = Value("large_string") >>> ds = ds.cast(new_features) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """
/usr/src/app/target_test_cases/failed_tests_IterableDataset.cast.txt
def cast( self, features: Features, ) -> "IterableDataset": """ Cast the dataset to a new set of features. Args: features ([`Features`]): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset. Returns: `IterableDataset`: A copy of the dataset with casted features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds.features.copy() >>> new_features["label"] = ClassLabel(names=["bad", "good"]) >>> new_features["text"] = Value("large_string") >>> ds = ds.cast(new_features) >>> ds.features {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ info = self._info.copy() info.features = features return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
IterableDataset.cast
datasets
18
src/datasets/iterable_dataset.py
def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset": """Cast column to feature for decoding. Args: column (`str`): Column name. feature (`Feature`): Target feature. Returns: `IterableDataset` Example: ```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True) >>> ds.features {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds.features {'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} ``` """
/usr/src/app/target_test_cases/failed_tests_IterableDataset.cast_column.txt
def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset": """Cast column to feature for decoding. Args: column (`str`): Column name. feature (`Feature`): Target feature. Returns: `IterableDataset` Example: ```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True) >>> ds.features {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds.features {'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} ``` """ info = self._info.copy() info.features[column] = feature return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
IterableDataset.cast_column
datasets
19
src/datasets/iterable_dataset.py
def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function. The filtering is done on-the-fly when iterating over the dataset. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True` If no function is provided, defaults to an always True function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, default `1000`): Number of examples per batch provided to `function` if `batched=True`. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds = ds.filter(lambda x: x["label"] == 0) >>> list(ds.take(3)) [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'}, {'label': 0, 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."}, {'label': 0, 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}] ``` """
/usr/src/app/target_test_cases/failed_tests_IterableDataset.filter.txt
def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function. The filtering is done on-the-fly when iterating over the dataset. Args: function (`Callable`): Callable with one of the following signatures: - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True` If no function is provided, defaults to an always True function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, default `1000`): Number of examples per batch provided to `function` if `batched=True`. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds = ds.filter(lambda x: x["label"] == 0) >>> list(ds.take(3)) [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'}, {'label': 0, 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."}, {'label': 0, 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}] ``` """ if isinstance(input_columns, str): input_columns = [input_columns] # TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example) info = copy.deepcopy(self._info) info.features = None # We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here ex_iterable = FilteredExamplesIterable( TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) if self._info.features is not None else self._ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs, formatting=self._formatting, ) return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
IterableDataset.filter
datasets
20
src/datasets/iterable_dataset.py
def map( self, function: Optional[Callable] = None, with_indices: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, features: Optional[Features] = None, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them. If your function returns a column that already exists, then it overwrites it. The function is applied on-the-fly on the examples when iterating over the dataset. You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}. - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`, *optional*, defaults to `None`): Function applied on-the-fly on the examples when you iterate on the dataset. It must have one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`[List[str]]`, *optional*, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. features (`[Features]`, *optional*, defaults to `None`): Feature types of the resulting dataset. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> list(ds.take(3)) [{'label': 1, 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'Review: effective but too-tepid biopic'}] ``` """
/usr/src/app/target_test_cases/failed_tests_IterableDataset.map.txt
def map( self, function: Optional[Callable] = None, with_indices: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, features: Optional[Features] = None, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them. If your function returns a column that already exists, then it overwrites it. The function is applied on-the-fly on the examples when iterating over the dataset. You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}. - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`, *optional*, defaults to `None`): Function applied on-the-fly on the examples when you iterate on the dataset. It must have one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`[List[str]]`, *optional*, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. features (`[Features]`, *optional*, defaults to `None`): Feature types of the resulting dataset. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> list(ds.take(3)) [{'label': 1, 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'Review: effective but too-tepid biopic'}] ``` """ if isinstance(input_columns, str): input_columns = [input_columns] if isinstance(remove_columns, str): remove_columns = [remove_columns] if function is None: function = identity_func if fn_kwargs is None: fn_kwargs = {} ex_iterable = ( TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) if self._info.features is not None else self._ex_iterable ) ex_iterable = ( RebatchedArrowExamplesIterable(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch) if self._formatting and self._formatting.format_type == "arrow" else ex_iterable ) ex_iterable = MappedExamplesIterable( ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, fn_kwargs=fn_kwargs, formatting=self._formatting, ) info = self.info.copy() info.features = features return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
IterableDataset.map
datasets
21
src/datasets/iterable_dataset.py
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """ Remove one or several column(s) in the dataset and the features associated to them. The removal is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Returns: `IterableDataset`: A copy of the dataset object without the columns to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.remove_columns("label") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """
/usr/src/app/target_test_cases/failed_tests_IterableDataset.remove_columns.txt
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """ Remove one or several column(s) in the dataset and the features associated to them. The removal is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Returns: `IterableDataset`: A copy of the dataset object without the columns to remove. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.remove_columns("label") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ original_features = self._info.features.copy() if self._info.features else None ds_iterable = self.map(remove_columns=column_names) if original_features is not None: ds_iterable._info.features = original_features.copy() for col, _ in original_features.items(): if col in column_names: del ds_iterable._info.features[col] return ds_iterable
IterableDataset.remove_columns
datasets
22
src/datasets/iterable_dataset.py
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Returns: `IterableDataset`: A copy of the dataset with a renamed column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} >>> ds = ds.rename_column("text", "movie_review") >>> next(iter(ds)) {'label': 1, 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """
/usr/src/app/target_test_cases/failed_tests_IterableDataset.rename_column.txt
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Returns: `IterableDataset`: A copy of the dataset with a renamed column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} >>> ds = ds.rename_column("text", "movie_review") >>> next(iter(ds)) {'label': 1, 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return self.rename_columns({original_column_name: new_column_name})
IterableDataset.rename_column
datasets
23
src/datasets/iterable_dataset.py
def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """Select one or several column(s) in the dataset and the features associated to them. The selection is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to select. Returns: `IterableDataset`: A copy of the dataset object with selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.select_columns("text") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """
/usr/src/app/target_test_cases/failed_tests_IterableDataset.select_columns.txt
def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """Select one or several column(s) in the dataset and the features associated to them. The selection is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to select. Returns: `IterableDataset`: A copy of the dataset object with selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.select_columns("text") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ if isinstance(column_names, str): column_names = [column_names] if self._info: info = copy.deepcopy(self._info) if self._info.features is not None: missing_columns = set(column_names) - set(self._info.features.keys()) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the " "dataset. Columns in the dataset: " f"{list(self._info.features.keys())}." ) info.features = Features({c: info.features[c] for c in column_names}) ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names) return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=self._shuffling, distributed=self._distributed, token_per_repo_id=self._token_per_repo_id, )
IterableDataset.select_columns
datasets
24
src/datasets/iterable_dataset.py
def shuffle( self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000 ) -> "IterableDataset": """ Randomly shuffles the elements of this dataset. This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer, replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or equal to the full size of the dataset is required. For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will initially select a random element from only the first 1000 elements in the buffer. Once an element is selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, maintaining the 1000 element buffer. If the dataset is made of several shards, it also does shuffle the order of the shards. However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`] then the order of the shards is kept unchanged. Args: seed (`int`, *optional*, defaults to `None`): Random seed that will be used to shuffle the dataset. It is used to sample from the shuffle buffer and also to shuffle the data shards. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). buffer_size (`int`, defaults to `1000`): Size of the buffer. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> shuffled_ds = ds.shuffle(seed=42) >>> list(shuffled_ds.take(3)) [{'label': 1, 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."}, {'label': 1, 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'}, {'label': 1, 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}] ``` """
/usr/src/app/target_test_cases/failed_tests_IterableDataset.shuffle.txt
def shuffle( self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000 ) -> "IterableDataset": """ Randomly shuffles the elements of this dataset. This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer, replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or equal to the full size of the dataset is required. For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will initially select a random element from only the first 1000 elements in the buffer. Once an element is selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, maintaining the 1000 element buffer. If the dataset is made of several shards, it also does shuffle the order of the shards. However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`] then the order of the shards is kept unchanged. Args: seed (`int`, *optional*, defaults to `None`): Random seed that will be used to shuffle the dataset. It is used to sample from the shuffle buffer and also to shuffle the data shards. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). buffer_size (`int`, defaults to `1000`): Size of the buffer. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> shuffled_ds = ds.shuffle(seed=42) >>> list(shuffled_ds.take(3)) [{'label': 1, 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."}, {'label': 1, 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'}, {'label': 1, 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}] ``` """ if generator is None: generator = np.random.default_rng(seed) else: generator = deepcopy(generator) shuffling = ShufflingConfig(generator=generator, _original_seed=seed) return IterableDataset( ex_iterable=BufferShuffledExamplesIterable( self._ex_iterable, buffer_size=buffer_size, generator=generator ), info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=shuffling, distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
IterableDataset.shuffle
datasets
25
src/datasets/iterable_dataset.py
def skip(self, n: int) -> "IterableDataset": """ Create a new [`IterableDataset`] that skips the first `n` elements. Args: n (`int`): Number of elements to skip. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> ds = ds.skip(1) >>> list(ds.take(3)) [{'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}, {'label': 1, 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}] ``` """
/usr/src/app/target_test_cases/failed_tests_IterableDataset.skip.txt
def skip(self, n: int) -> "IterableDataset": """ Create a new [`IterableDataset`] that skips the first `n` elements. Args: n (`int`): Number of elements to skip. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> ds = ds.skip(1) >>> list(ds.take(3)) [{'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}, {'label': 1, 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}] ``` """ ex_iterable = SkipExamplesIterable( self._ex_iterable, n, block_sources_order_when_shuffling=self._shuffling is None, split_when_sharding=self._distributed is None, ) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
IterableDataset.skip
datasets
26
src/datasets/iterable_dataset.py
def take(self, n: int) -> "IterableDataset": """ Create a new [`IterableDataset`] with only the first `n` elements. Args: n (`int`): Number of elements to take. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> small_ds = ds.take(2) >>> list(small_ds) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}] ``` """
/usr/src/app/target_test_cases/failed_tests_IterableDataset.take.txt
def take(self, n: int) -> "IterableDataset": """ Create a new [`IterableDataset`] with only the first `n` elements. Args: n (`int`): Number of elements to take. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> small_ds = ds.take(2) >>> list(small_ds) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}] ``` """ ex_iterable = TakeExamplesIterable( self._ex_iterable, n, block_sources_order_when_shuffling=self._shuffling is None, split_when_sharding=self._distributed is None, ) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
IterableDataset.take
datasets
27
src/datasets/download/streaming_download_manager.py
def download(self, url_or_urls): """Normalize URL(s) of files to stream data from. This is the lazy version of `DownloadManager.download` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """
/usr/src/app/target_test_cases/failed_tests_StreamingDownloadManager.download.txt
def download(self, url_or_urls): """Normalize URL(s) of files to stream data from. This is the lazy version of `DownloadManager.download` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """ url_or_urls = map_nested(self._download_single, url_or_urls, map_tuple=True) return url_or_urls
StreamingDownloadManager.download
datasets
28
src/datasets/download/streaming_download_manager.py
def download_and_extract(self, url_or_urls): """Prepare given `url_or_urls` for streaming (add extraction protocol). This is the lazy version of `DownloadManager.download_and_extract` for streaming. Is equivalent to: ``` urls = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls (`str` or `list` or `dict`): URL(s) to stream from data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. """
/usr/src/app/target_test_cases/failed_tests_StreamingDownloadManager.download_and_extract.txt
def download_and_extract(self, url_or_urls): """Prepare given `url_or_urls` for streaming (add extraction protocol). This is the lazy version of `DownloadManager.download_and_extract` for streaming. Is equivalent to: ``` urls = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls (`str` or `list` or `dict`): URL(s) to stream from data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. """ return self.extract(self.download(url_or_urls))
StreamingDownloadManager.download_and_extract
datasets
29
src/datasets/download/streaming_download_manager.py
def extract(self, url_or_urls): """Add extraction protocol for given url(s) for streaming. This is the lazy version of `DownloadManager.extract` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """
/usr/src/app/target_test_cases/failed_tests_StreamingDownloadManager.extract.txt
def extract(self, url_or_urls): """Add extraction protocol for given url(s) for streaming. This is the lazy version of `DownloadManager.extract` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """ urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True) return urlpaths
StreamingDownloadManager.extract
datasets
30
src/datasets/download/streaming_download_manager.py
def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]: """Iterate over files within an archive. Args: urlpath_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """
/usr/src/app/target_test_cases/failed_tests_StreamingDownloadManager.iter_archive.txt
def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]: """Iterate over files within an archive. Args: urlpath_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """ if hasattr(urlpath_or_buf, "read"): return ArchiveIterable.from_buf(urlpath_or_buf) else: return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config)
StreamingDownloadManager.iter_archive
datasets
31
src/datasets/download/streaming_download_manager.py
def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]: """Iterate over files. Args: urlpaths (`str` or `list` of `str`): Root paths. Yields: str: File URL path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """
/usr/src/app/target_test_cases/failed_tests_StreamingDownloadManager.iter_files.txt
def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]: """Iterate over files. Args: urlpaths (`str` or `list` of `str`): Root paths. Yields: str: File URL path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """ return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
StreamingDownloadManager.iter_files
datasets
32
src/datasets/table.py
def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """
/usr/src/app/target_test_cases/failed_tests_Table.equals.txt
def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs)
Table.equals
datasets
33
src/datasets/table.py
def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """
/usr/src/app/target_test_cases/failed_tests_Table.validate.txt
def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs)
Table.validate
datasets
34
src/datasets/utils/sharding.py
def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]: """ Get the range of shard indices per job. If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard. The shards indices order is preserved: e.g. all the first shards are given the first job. Moreover all the jobs are given approximately the same number of shards. Example: ```python >>> _distribute_shards(2, max_num_jobs=4) [range(0, 1), range(1, 2)] >>> _distribute_shards(10, max_num_jobs=3) [range(0, 4), range(4, 7), range(7, 10)] ``` """
/usr/src/app/target_test_cases/failed_tests__distribute_shards.txt
def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]: """ Get the range of shard indices per job. If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard. The shards indices order is preserved: e.g. all the first shards are given the first job. Moreover all the jobs are given approximately the same number of shards. Example: ```python >>> _distribute_shards(2, max_num_jobs=4) [range(0, 1), range(1, 2)] >>> _distribute_shards(10, max_num_jobs=3) [range(0, 4), range(4, 7), range(7, 10)] ``` """ shards_indices_per_group = [] for group_idx in range(max_num_jobs): num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 shard_indices = range(start, start + num_shards_to_add) shards_indices_per_group.append(shard_indices) return shards_indices_per_group
_distribute_shards
datasets
35
src/datasets/table.py
def _interpolation_search(arr: List[int], x: int) -> int: """ Return the position i of a sorted array so that arr[i] <= x < arr[i+1] Args: arr (`List[int]`): non-empty sorted list of integers x (`int`): query Returns: `int`: the position i so that arr[i] <= x < arr[i+1] Raises: `IndexError`: if the array is empty or if the query is outside the array values """
/usr/src/app/target_test_cases/failed_tests__interpolation_search.txt
def _interpolation_search(arr: List[int], x: int) -> int: """ Return the position i of a sorted array so that arr[i] <= x < arr[i+1] Args: arr (`List[int]`): non-empty sorted list of integers x (`int`): query Returns: `int`: the position i so that arr[i] <= x < arr[i+1] Raises: `IndexError`: if the array is empty or if the query is outside the array values """ i, j = 0, len(arr) - 1 while i < j and arr[i] <= x < arr[j]: k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i])) if arr[k] <= x < arr[k + 1]: return k elif arr[k] < x: i, j = k + 1, j else: i, j = i, k raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.")
_interpolation_search
datasets
36
src/datasets/data_files.py
def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool: """ When a path matches a pattern, we additionnally check if it's inside a special directory we ignore by default (if it starts with a double underscore). Users can still explicitly request a filepath inside such a directory if "__pycache__" is mentioned explicitly in the requested pattern. Some examples: base directory: ./ └── __pycache__ └── b.txt >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**") True >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt") True >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*") False >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*") False """
/usr/src/app/target_test_cases/failed_tests__is_inside_unrequested_special_dir.txt
def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool: """ When a path matches a pattern, we additionnally check if it's inside a special directory we ignore by default (if it starts with a double underscore). Users can still explicitly request a filepath inside such a directory if "__pycache__" is mentioned explicitly in the requested pattern. Some examples: base directory: ./ └── __pycache__ └── b.txt >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**") True >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt") True >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*") False >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*") False """ # We just need to check if every special directories from the path is present explicly in the pattern. # Since we assume that the path matches the pattern, it's equivalent to counting that both # the parent path and the parent pattern have the same number of special directories. data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith("__")] data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith("__")] return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern)
_is_inside_unrequested_special_dir
datasets
37
src/datasets/utils/file_utils.py
def cached_path( url_or_filename, download_config=None, **download_kwargs, ) -> str: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk ValueError: if it couldn't parse the url or filename correctly requests.exceptions.ConnectionError: in case of internet connection issue """
/usr/src/app/target_test_cases/failed_tests_cached_path.txt
def cached_path( url_or_filename, download_config=None, **download_kwargs, ) -> str: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk ValueError: if it couldn't parse the url or filename correctly requests.exceptions.ConnectionError: in case of internet connection issue """ if download_config is None: download_config = DownloadConfig(**download_kwargs) cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) # Convert fsspec URL in the format "file://local/path" to "local/path" if can_be_local(url_or_filename): url_or_filename = strip_protocol(url_or_filename) if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) url_or_filename, storage_options = _prepare_path_and_storage_options( url_or_filename, download_config=download_config ) # Download files from Hugging Face. # Note: no need to check for https://huggingface.co file URLs since _prepare_path_and_storage_options # prepares Hugging Face HTTP URLs as hf:// paths already if url_or_filename.startswith("hf://"): resolved_path = huggingface_hub.HfFileSystem( endpoint=config.HF_ENDPOINT, token=download_config.token ).resolve_path(url_or_filename) try: output_path = huggingface_hub.HfApi( endpoint=config.HF_ENDPOINT, token=download_config.token, library_name="datasets", library_version=__version__, user_agent=get_datasets_user_agent(download_config.user_agent), ).hf_hub_download( repo_id=resolved_path.repo_id, repo_type=resolved_path.repo_type, revision=resolved_path.revision, filename=resolved_path.path_in_repo, force_download=download_config.force_download, proxies=download_config.proxies, ) except ( huggingface_hub.utils.RepositoryNotFoundError, huggingface_hub.utils.EntryNotFoundError, huggingface_hub.utils.RevisionNotFoundError, huggingface_hub.utils.GatedRepoError, ) as e: raise FileNotFoundError(str(e)) from e # Download external files else: output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=download_config.force_download, user_agent=download_config.user_agent, use_etag=download_config.use_etag, token=download_config.token, storage_options=storage_options, download_desc=download_config.download_desc, disable_tqdm=download_config.disable_tqdm, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif is_local_path(url_or_filename): # File, but it doesn't exist. raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist") else: # Something unknown raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") if output_path is None: return output_path if download_config.extract_compressed_file: if download_config.extract_on_the_fly: # Add a compression prefix to the compressed file so that it can be extracted # as it's being read using xopen. protocol = _get_extraction_protocol(output_path, download_config=download_config) extension = _get_path_extension(url_or_filename.split("::")[0]) if ( protocol and extension not in ["tgz", "tar"] and not url_or_filename.split("::")[0].endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): output_path = relative_to_absolute_path(output_path) if protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS: # there is one single file which is the uncompressed file inner_file = os.path.basename(output_path) inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file output_path = f"{protocol}://{inner_file}::{output_path}" else: output_path = f"{protocol}://::{output_path}" return output_path # Eager extraction output_path = ExtractManager(cache_dir=download_config.cache_dir).extract( output_path, force_extract=download_config.force_extract ) return relative_to_absolute_path(output_path)
cached_path
datasets
38
src/datasets/features/features.py
def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any: """ Cast numpy/pytorch/tensorflow/pandas objects to python lists. It works recursively. If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object """
/usr/src/app/target_test_cases/failed_tests_cast_to_python_objects.txt
def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any: """ Cast numpy/pytorch/tensorflow/pandas objects to python lists. It works recursively. If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object """ return _cast_to_python_objects( obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0]
cast_to_python_objects
datasets
39
src/datasets/table.py
def concat_tables(tables: List[Table], axis: int = 0) -> Table: """ Concatenate tables. Args: tables (list of `Table`): List of tables to be concatenated. axis (`{0, 1}`, defaults to `0`, meaning over rows): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Returns: `datasets.table.Table`: If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`. Otherwise if there's only one table, it is returned as is. """
/usr/src/app/target_test_cases/failed_tests_concat_tables.txt
def concat_tables(tables: List[Table], axis: int = 0) -> Table: """ Concatenate tables. Args: tables (list of `Table`): List of tables to be concatenated. axis (`{0, 1}`, defaults to `0`, meaning over rows): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Returns: `datasets.table.Table`: If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`. Otherwise if there's only one table, it is returned as is. """ tables = list(tables) if len(tables) == 1: return tables[0] return ConcatenationTable.from_tables(tables, axis=axis)
concat_tables
datasets
40
src/datasets/hub.py
def convert_to_parquet( repo_id: str, revision: Optional[str] = None, token: Optional[Union[bool, str]] = None, trust_remote_code: Optional[bool] = None, ) -> CommitInfo: """Convert Hub [script-based dataset](dataset_script) to Parquet [data-only dataset](repository_structure), so that the dataset viewer will be supported. This function: - makes a copy of the script on the "main" branch into a dedicated branch called "script" (if it does not already exist) - creates a pull request to the Hub dataset to convert it to Parquet files (and deletes the script from the main branch) If in the future you need to recreate the Parquet files from the "script" branch, pass the `revision="script"` argument. Note that you should pass the `trust_remote_code=True` argument only if you trust the remote code to be executed locally on your machine. Args: repo_id (`str`): ID of the source Hub dataset repository, in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. revision (`str`, *optional*): Branch of the source Hub dataset repository. Defaults to the `"main"` branch. token (`bool` or `str`, *optional*): Authentication token for the Hugging Face Hub. trust_remote_code (`bool`, defaults to `False`): Whether you trust the remote code of the Hub script-based dataset to be executed locally on your machine. This option should only be set to `True` for repositories where you have read the code and which you trust. <Changed version="2.20.0"> `trust_remote_code` defaults to `False` if not specified. </Changed> Returns: `huggingface_hub.CommitInfo` """
/usr/src/app/target_test_cases/failed_tests_convert_to_parquet.txt
def convert_to_parquet( repo_id: str, revision: Optional[str] = None, token: Optional[Union[bool, str]] = None, trust_remote_code: Optional[bool] = None, ) -> CommitInfo: """Convert Hub [script-based dataset](dataset_script) to Parquet [data-only dataset](repository_structure), so that the dataset viewer will be supported. This function: - makes a copy of the script on the "main" branch into a dedicated branch called "script" (if it does not already exist) - creates a pull request to the Hub dataset to convert it to Parquet files (and deletes the script from the main branch) If in the future you need to recreate the Parquet files from the "script" branch, pass the `revision="script"` argument. Note that you should pass the `trust_remote_code=True` argument only if you trust the remote code to be executed locally on your machine. Args: repo_id (`str`): ID of the source Hub dataset repository, in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. revision (`str`, *optional*): Branch of the source Hub dataset repository. Defaults to the `"main"` branch. token (`bool` or `str`, *optional*): Authentication token for the Hugging Face Hub. trust_remote_code (`bool`, defaults to `False`): Whether you trust the remote code of the Hub script-based dataset to be executed locally on your machine. This option should only be set to `True` for repositories where you have read the code and which you trust. <Changed version="2.20.0"> `trust_remote_code` defaults to `False` if not specified. </Changed> Returns: `huggingface_hub.CommitInfo` """ print(f"{repo_id}") configs = get_dataset_config_names(repo_id, token=token, revision=revision, trust_remote_code=trust_remote_code) print(f"{configs = }") default_config = get_dataset_default_config_name( repo_id, token=token, revision=revision, trust_remote_code=trust_remote_code ) print(f"{default_config = }") if default_config: config = default_config configs.remove(default_config) else: config = configs.pop(0) print(f"{config = }") dataset = load_dataset(repo_id, config, revision=revision, trust_remote_code=trust_remote_code) commit_info = dataset.push_to_hub( repo_id, config_name=config, commit_message="Convert dataset to Parquet", commit_description="Convert dataset to Parquet.", create_pr=True, token=token, set_default=default_config is not None, ) time.sleep(5) pr_revision, pr_url = commit_info.pr_revision, commit_info.pr_url for config in configs: print(f"{config = }") dataset = load_dataset(repo_id, config, revision=revision, trust_remote_code=trust_remote_code) dataset.push_to_hub( repo_id, config_name=config, commit_message=f"Add '{config}' config data files", revision=pr_revision, token=token, ) time.sleep(5) _delete_files(repo_id, revision=pr_revision, token=token) if not revision: api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) try: api.create_branch(repo_id, branch="script", repo_type="dataset", token=token, exist_ok=True) except HfHubHTTPError: pass print(f"You can find your PR to convert the dataset to Parquet at: {pr_url}") return commit_info
convert_to_parquet
datasets
41
src/datasets/hub.py
def delete_from_hub( repo_id: str, config_name: str, revision: Optional[str] = None, token: Optional[Union[bool, str]] = None, ) -> CommitInfo: """Delete a dataset configuration from a [data-only dataset](repository_structure) on the Hub. Args: repo_id (`str`): ID of the Hub dataset repository, in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. config_name (`str`): Name of the dataset configuration. revision (`str`, *optional*): Branch to delete the configuration from. Defaults to the `"main"` branch. token (`bool` or `str`, *optional*): Authentication token for the Hugging Face Hub. Returns: `huggingface_hub.CommitInfo` """
/usr/src/app/target_test_cases/failed_tests_delete_from_hub.txt
def delete_from_hub( repo_id: str, config_name: str, revision: Optional[str] = None, token: Optional[Union[bool, str]] = None, ) -> CommitInfo: """Delete a dataset configuration from a [data-only dataset](repository_structure) on the Hub. Args: repo_id (`str`): ID of the Hub dataset repository, in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. config_name (`str`): Name of the dataset configuration. revision (`str`, *optional*): Branch to delete the configuration from. Defaults to the `"main"` branch. token (`bool` or `str`, *optional*): Authentication token for the Hugging Face Hub. Returns: `huggingface_hub.CommitInfo` """ operations = [] # data_files fs = HfFileSystem(endpoint=datasets.config.HF_ENDPOINT, token=token) builder = load_dataset_builder(repo_id, config_name, revision=revision, token=token, trust_remote_code=False) for data_file in chain(*builder.config.data_files.values()): data_file_resolved_path = fs.resolve_path(data_file) if data_file_resolved_path.repo_id == repo_id: operations.append(CommitOperationDelete(path_in_repo=data_file_resolved_path.path_in_repo)) # README.md dataset_card = DatasetCard.load(repo_id) # config_names if dataset_card.data.get("config_names", None) and config_name in dataset_card.data["config_names"]: dataset_card.data["config_names"].remove(config_name) # metadata_configs metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card.data) if metadata_configs: _ = metadata_configs.pop(config_name, None) dataset_card_data = DatasetCardData() metadata_configs.to_dataset_card_data(dataset_card_data) if datasets.config.METADATA_CONFIGS_FIELD in dataset_card_data: dataset_card.data[datasets.config.METADATA_CONFIGS_FIELD] = dataset_card_data[ datasets.config.METADATA_CONFIGS_FIELD ] else: _ = dataset_card.data.pop(datasets.config.METADATA_CONFIGS_FIELD, None) # dataset_info dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card.data) if dataset_infos: _ = dataset_infos.pop(config_name, None) dataset_card_data = DatasetCardData() dataset_infos.to_dataset_card_data(dataset_card_data) if "dataset_info" in dataset_card_data: dataset_card.data["dataset_info"] = dataset_card_data["dataset_info"] else: _ = dataset_card.data.pop("dataset_info", None) # Commit operations.append( CommitOperationAdd(path_in_repo=datasets.config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode()) ) api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) commit_info = api.create_commit( repo_id, operations=operations, commit_message=f"Delete '{config_name}' config", commit_description=f"Delete '{config_name}' config.", token=token, repo_type="dataset", revision=revision, create_pr=True, ) print(f"You can find your PR to delete the dataset config at: {commit_info.pr_url}") return commit_info
delete_from_hub
datasets
42
src/datasets/inspect.py
def get_dataset_config_info( path: str, config_name: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, **config_kwargs, ) -> DatasetInfo: """Get the meta information (DatasetInfo) about a dataset for a particular config Args: path (``str``): path to the dataset processing script with the dataset builder. Can be either: - a local path to processing script or the directory containing the script (if the script has the same name as the directory), e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'`` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'`` config_name (:obj:`str`, optional): Defining the name of the dataset configuration. data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s). download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters. download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode. revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If True, or not specified, will get token from `"~/.huggingface"`. **config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied. """
/usr/src/app/target_test_cases/failed_tests_get_dataset_config_info.txt
def get_dataset_config_info( path: str, config_name: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, **config_kwargs, ) -> DatasetInfo: """Get the meta information (DatasetInfo) about a dataset for a particular config Args: path (``str``): path to the dataset processing script with the dataset builder. Can be either: - a local path to processing script or the directory containing the script (if the script has the same name as the directory), e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'`` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'`` config_name (:obj:`str`, optional): Defining the name of the dataset configuration. data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s). download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters. download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode. revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If True, or not specified, will get token from `"~/.huggingface"`. **config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied. """ builder = load_dataset_builder( path, name=config_name, data_files=data_files, download_config=download_config, download_mode=download_mode, revision=revision, token=token, **config_kwargs, ) info = builder.info if info.splits is None: download_config = download_config.copy() if download_config else DownloadConfig() if token is not None: download_config.token = token builder._check_manual_download( StreamingDownloadManager(base_path=builder.base_path, download_config=download_config) ) try: info.splits = { split_generator.name: {"name": split_generator.name, "dataset_name": path} for split_generator in builder._split_generators( StreamingDownloadManager(base_path=builder.base_path, download_config=download_config) ) } except Exception as err: raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err return info
get_dataset_config_info
datasets
43
src/datasets/inspect.py
def get_dataset_config_names( path: str, revision: Optional[Union[str, Version]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, dynamic_modules_path: Optional[str] = None, data_files: Optional[Union[Dict, List, str]] = None, **download_kwargs, ): """Get the list of available config names for a particular dataset. Args: path (`str`): path to the dataset processing script with the dataset builder. Can be either: - a local path to processing script or the directory containing the script (if the script has the same name as the directory), e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'squad'`, `'glue'` or `'openai/webtext'` revision (`Union[str, datasets.Version]`, *optional*): If specified, the dataset module will be loaded from the datasets repository at this version. By default: - it is set to the local version of the lib. - it will also try to load it from the main branch if it's not available at the local version of the lib. Specifying a version that is different from your local version of the lib might cause compatibility issues. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`): Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`. By default the datasets are stored inside the `datasets_modules` module. data_files (`Union[Dict, List, str]`, *optional*): Defining the data_files of the dataset configuration. **download_kwargs (additional keyword arguments): Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied, for example `token`. Example: ```py >>> from datasets import get_dataset_config_names >>> get_dataset_config_names("glue") ['cola', 'sst2', 'mrpc', 'qqp', 'stsb', 'mnli', 'mnli_mismatched', 'mnli_matched', 'qnli', 'rte', 'wnli', 'ax'] ``` """
/usr/src/app/target_test_cases/failed_tests_get_dataset_config_names.txt
def get_dataset_config_names( path: str, revision: Optional[Union[str, Version]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, dynamic_modules_path: Optional[str] = None, data_files: Optional[Union[Dict, List, str]] = None, **download_kwargs, ): """Get the list of available config names for a particular dataset. Args: path (`str`): path to the dataset processing script with the dataset builder. Can be either: - a local path to processing script or the directory containing the script (if the script has the same name as the directory), e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'squad'`, `'glue'` or `'openai/webtext'` revision (`Union[str, datasets.Version]`, *optional*): If specified, the dataset module will be loaded from the datasets repository at this version. By default: - it is set to the local version of the lib. - it will also try to load it from the main branch if it's not available at the local version of the lib. Specifying a version that is different from your local version of the lib might cause compatibility issues. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`): Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`. By default the datasets are stored inside the `datasets_modules` module. data_files (`Union[Dict, List, str]`, *optional*): Defining the data_files of the dataset configuration. **download_kwargs (additional keyword arguments): Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied, for example `token`. Example: ```py >>> from datasets import get_dataset_config_names >>> get_dataset_config_names("glue") ['cola', 'sst2', 'mrpc', 'qqp', 'stsb', 'mnli', 'mnli_mismatched', 'mnli_matched', 'qnli', 'rte', 'wnli', 'ax'] ``` """ dataset_module = dataset_module_factory( path, revision=revision, download_config=download_config, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path, data_files=data_files, **download_kwargs, ) builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path)) return list(builder_cls.builder_configs.keys()) or [ dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME or "default") ]
get_dataset_config_names
datasets
44
src/datasets/inspect.py
def get_dataset_default_config_name( path: str, revision: Optional[Union[str, Version]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, dynamic_modules_path: Optional[str] = None, data_files: Optional[Union[Dict, List, str]] = None, **download_kwargs, ) -> Optional[str]: """Get the default config name for a particular dataset. Can return None only if the dataset has multiple configurations and no default configuration. Args: path (`str`): path to the dataset processing script with the dataset builder. Can be either: - a local path to processing script or the directory containing the script (if the script has the same name as the directory), e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'squad'`, `'glue'` or `'openai/webtext'` revision (`Union[str, datasets.Version]`, *optional*): If specified, the dataset module will be loaded from the datasets repository at this version. By default: - it is set to the local version of the lib. - it will also try to load it from the main branch if it's not available at the local version of the lib. Specifying a version that is different from your local version of the lib might cause compatibility issues. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`): Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`. By default the datasets are stored inside the `datasets_modules` module. data_files (`Union[Dict, List, str]`, *optional*): Defining the data_files of the dataset configuration. **download_kwargs (additional keyword arguments): Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied, for example `token`. Returns: Optional[str]: the default config name if there is one Example: ```py >>> from datasets import get_dataset_default_config_name >>> get_dataset_default_config_name("openbookqa") 'main' ``` """
/usr/src/app/target_test_cases/failed_tests_get_dataset_default_config_name.txt
def get_dataset_default_config_name( path: str, revision: Optional[Union[str, Version]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, dynamic_modules_path: Optional[str] = None, data_files: Optional[Union[Dict, List, str]] = None, **download_kwargs, ) -> Optional[str]: """Get the default config name for a particular dataset. Can return None only if the dataset has multiple configurations and no default configuration. Args: path (`str`): path to the dataset processing script with the dataset builder. Can be either: - a local path to processing script or the directory containing the script (if the script has the same name as the directory), e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'squad'`, `'glue'` or `'openai/webtext'` revision (`Union[str, datasets.Version]`, *optional*): If specified, the dataset module will be loaded from the datasets repository at this version. By default: - it is set to the local version of the lib. - it will also try to load it from the main branch if it's not available at the local version of the lib. Specifying a version that is different from your local version of the lib might cause compatibility issues. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`): Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`. By default the datasets are stored inside the `datasets_modules` module. data_files (`Union[Dict, List, str]`, *optional*): Defining the data_files of the dataset configuration. **download_kwargs (additional keyword arguments): Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied, for example `token`. Returns: Optional[str]: the default config name if there is one Example: ```py >>> from datasets import get_dataset_default_config_name >>> get_dataset_default_config_name("openbookqa") 'main' ``` """ dataset_module = dataset_module_factory( path, revision=revision, download_config=download_config, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path, data_files=data_files, **download_kwargs, ) builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path)) builder_configs = list(builder_cls.builder_configs.keys()) if builder_configs: default_config_name = builder_configs[0] if len(builder_configs) == 1 else None else: default_config_name = "default" return builder_cls.DEFAULT_CONFIG_NAME or default_config_name
get_dataset_default_config_name
datasets
45
src/datasets/inspect.py
def get_dataset_infos( path: str, data_files: Optional[Union[Dict, List, str]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, **config_kwargs, ): """Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict. Args: path (`str`): path to the dataset processing script with the dataset builder. Can be either: - a local path to processing script or the directory containing the script (if the script has the same name as the directory), e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'squad'`, `'glue'` or``'openai/webtext'` revision (`Union[str, datasets.Version]`, *optional*): If specified, the dataset module will be loaded from the datasets repository at this version. By default: - it is set to the local version of the lib. - it will also try to load it from the main branch if it's not available at the local version of the lib. Specifying a version that is different from your local version of the lib might cause compatibility issues. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. data_files (`Union[Dict, List, str]`, *optional*): Defining the data_files of the dataset configuration. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. **config_kwargs (additional keyword arguments): Optional attributes for builder class which will override the attributes if supplied. Example: ```py >>> from datasets import get_dataset_infos >>> get_dataset_infos('rotten_tomatoes') {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...} ``` """
/usr/src/app/target_test_cases/failed_tests_get_dataset_infos.txt
def get_dataset_infos( path: str, data_files: Optional[Union[Dict, List, str]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, **config_kwargs, ): """Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict. Args: path (`str`): path to the dataset processing script with the dataset builder. Can be either: - a local path to processing script or the directory containing the script (if the script has the same name as the directory), e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'squad'`, `'glue'` or``'openai/webtext'` revision (`Union[str, datasets.Version]`, *optional*): If specified, the dataset module will be loaded from the datasets repository at this version. By default: - it is set to the local version of the lib. - it will also try to load it from the main branch if it's not available at the local version of the lib. Specifying a version that is different from your local version of the lib might cause compatibility issues. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. data_files (`Union[Dict, List, str]`, *optional*): Defining the data_files of the dataset configuration. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. **config_kwargs (additional keyword arguments): Optional attributes for builder class which will override the attributes if supplied. Example: ```py >>> from datasets import get_dataset_infos >>> get_dataset_infos('rotten_tomatoes') {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...} ``` """ config_names = get_dataset_config_names( path=path, revision=revision, download_config=download_config, download_mode=download_mode, data_files=data_files, token=token, ) return { config_name: get_dataset_config_info( path=path, config_name=config_name, data_files=data_files, download_config=download_config, download_mode=download_mode, revision=revision, token=token, **config_kwargs, ) for config_name in config_names }
get_dataset_infos
datasets
46
src/datasets/inspect.py
def get_dataset_split_names( path: str, config_name: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, **config_kwargs, ): """Get the list of available splits for a particular config and dataset. Args: path (`str`): path to the dataset processing script with the dataset builder. Can be either: - a local path to processing script or the directory containing the script (if the script has the same name as the directory), e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'squad'`, `'glue'` or `'openai/webtext'` config_name (`str`, *optional*): Defining the name of the dataset configuration. data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. revision ([`Version`] or `str`, *optional*): Version of the dataset script to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. **config_kwargs (additional keyword arguments): Optional attributes for builder class which will override the attributes if supplied. Example: ```py >>> from datasets import get_dataset_split_names >>> get_dataset_split_names('rotten_tomatoes') ['train', 'validation', 'test'] ``` """
/usr/src/app/target_test_cases/failed_tests_get_dataset_split_names.txt
def get_dataset_split_names( path: str, config_name: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, **config_kwargs, ): """Get the list of available splits for a particular config and dataset. Args: path (`str`): path to the dataset processing script with the dataset builder. Can be either: - a local path to processing script or the directory containing the script (if the script has the same name as the directory), e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`huggingface_hub.list_datasets`]), e.g. `'squad'`, `'glue'` or `'openai/webtext'` config_name (`str`, *optional*): Defining the name of the dataset configuration. data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. revision ([`Version`] or `str`, *optional*): Version of the dataset script to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. **config_kwargs (additional keyword arguments): Optional attributes for builder class which will override the attributes if supplied. Example: ```py >>> from datasets import get_dataset_split_names >>> get_dataset_split_names('rotten_tomatoes') ['train', 'validation', 'test'] ``` """ info = get_dataset_config_info( path, config_name=config_name, data_files=data_files, download_config=download_config, download_mode=download_mode, revision=revision, token=token, **config_kwargs, ) return list(info.splits.keys())
get_dataset_split_names
datasets
47
src/datasets/utils/file_utils.py
def get_from_cache( url, cache_dir=None, force_download=False, user_agent=None, use_etag=True, token=None, storage_options=None, download_desc=None, disable_tqdm=False, ) -> str: """ Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the path to the cached file. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk """
/usr/src/app/target_test_cases/failed_tests_get_from_cache.txt
def get_from_cache( url, cache_dir=None, force_download=False, user_agent=None, use_etag=True, token=None, storage_options=None, download_desc=None, disable_tqdm=False, ) -> str: """ Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the path to the cached file. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk """ if storage_options is None: storage_options = {} if cache_dir is None: cache_dir = config.HF_DATASETS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) response = None etag = None # Try a first time to file the file on the local file system without eTag (None) # if we don't ask for 'force_download' then we spare a request filename = hash_url_to_filename(url, etag=None) cache_path = os.path.join(cache_dir, filename) if os.path.exists(cache_path) and not force_download and not use_etag: return cache_path # Prepare headers for authentication headers = get_authentication_headers_for_url(url, token=token) if user_agent is not None: headers["user-agent"] = user_agent response = fsspec_head(url, storage_options=storage_options) etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None # Try a second time filename = hash_url_to_filename(url, etag) cache_path = os.path.join(cache_dir, filename) if os.path.exists(cache_path) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lock_path = cache_path + ".lock" with FileLock(lock_path): # Retry in case previously locked processes just enter after the precedent process releases the lock if os.path.exists(cache_path) and not force_download: return cache_path incomplete_path = cache_path + ".incomplete" @contextmanager def temp_file_manager(mode="w+b"): with open(incomplete_path, mode) as f: yield f # Download to temporary file, then copy to cache path once finished. # Otherwise, you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") # GET file object fsspec_get(url, temp_file, storage_options=storage_options, desc=download_desc, disable_tqdm=disable_tqdm) logger.info(f"storing {url} in cache at {cache_path}") shutil.move(temp_file.name, cache_path) umask = os.umask(0o666) os.umask(umask) os.chmod(cache_path, 0o666 & ~umask) logger.info(f"creating metadata file for {cache_path}") meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w", encoding="utf-8") as meta_file: json.dump(meta, meta_file) return cache_path
get_from_cache
datasets
48
src/datasets/io/parquet.py
def get_writer_batch_size(features: Features) -> Optional[int]: """ Get the writer_batch_size that defines the maximum row group size in the parquet files. The default in `datasets` is 1,000 but we lower it to 100 for image datasets. This allows to optimize random access to parquet file, since accessing 1 row requires to read its entire row group. This can be improved to get optimized size for querying/iterating but at least it matches the dataset viewer expectations on HF. Args: ds_config_info (`datasets.info.DatasetInfo`): Dataset info from `datasets`. Returns: writer_batch_size (`Optional[int]`): Writer batch size to pass to a dataset builder. If `None`, then it will use the `datasets` default. """
/usr/src/app/target_test_cases/failed_tests_get_writer_batch_size.txt
def get_writer_batch_size(features: Features) -> Optional[int]: """ Get the writer_batch_size that defines the maximum row group size in the parquet files. The default in `datasets` is 1,000 but we lower it to 100 for image datasets. This allows to optimize random access to parquet file, since accessing 1 row requires to read its entire row group. This can be improved to get optimized size for querying/iterating but at least it matches the dataset viewer expectations on HF. Args: ds_config_info (`datasets.info.DatasetInfo`): Dataset info from `datasets`. Returns: writer_batch_size (`Optional[int]`): Writer batch size to pass to a dataset builder. If `None`, then it will use the `datasets` default. """ batch_size = np.inf def set_batch_size(feature: FeatureType) -> None: nonlocal batch_size if isinstance(feature, Image): batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) elif isinstance(feature, Audio): batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) elif isinstance(feature, Value) and feature.dtype == "binary": batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) _visit(features, set_batch_size) return None if batch_size is np.inf else batch_size
get_writer_batch_size
datasets
49
src/datasets/combine.py
def interleave_datasets( datasets: List[DatasetType], probabilities: Optional[List[float]] = None, seed: Optional[int] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ) -> DatasetType: """ Interleave several datasets (sources) into a single dataset. The new dataset is constructed by alternating between the sources to get the examples. You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects. - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples. - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`, in which case, the resulting dataset ends when all datasets have ran out of examples at least one time. Note for iterable datasets: In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process. Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker). Args: datasets (`List[Dataset]` or `List[IterableDataset]`): List of datasets to interleave. probabilities (`List[float]`, *optional*, defaults to `None`): If specified, the new dataset is constructed by sampling examples from one source at a time according to these probabilities. seed (`int`, *optional*, defaults to `None`): The random seed used to choose a source for each example. info ([`DatasetInfo`], *optional*): Dataset information, like description, citation, etc. <Added version="2.4.0"/> split ([`NamedSplit`], *optional*): Name of the dataset split. <Added version="2.4.0"/> stopping_strategy (`str`, defaults to `first_exhausted`): Two strategies are proposed right now, `first_exhausted` and `all_exhausted`. By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples. - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. Returns: [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets` parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of `IterableDataset`. Example: For regular datasets (map-style): ```python >>> from datasets import Dataset, interleave_datasets >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]}) >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24] For datasets in streaming mode (iterable): >>> from datasets import load_dataset, interleave_datasets >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True) >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True) >>> dataset = interleave_datasets([d1, d2]) >>> iterator = iter(dataset) >>> next(iterator) {'text': 'Mtendere Village was inspired by the vision...} >>> next(iterator) {'text': "Média de débat d'idées, de culture...} ``` """
/usr/src/app/target_test_cases/failed_tests_interleave_datasets.txt
def interleave_datasets( datasets: List[DatasetType], probabilities: Optional[List[float]] = None, seed: Optional[int] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ) -> DatasetType: """ Interleave several datasets (sources) into a single dataset. The new dataset is constructed by alternating between the sources to get the examples. You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects. - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples. - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`, in which case, the resulting dataset ends when all datasets have ran out of examples at least one time. Note for iterable datasets: In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process. Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker). Args: datasets (`List[Dataset]` or `List[IterableDataset]`): List of datasets to interleave. probabilities (`List[float]`, *optional*, defaults to `None`): If specified, the new dataset is constructed by sampling examples from one source at a time according to these probabilities. seed (`int`, *optional*, defaults to `None`): The random seed used to choose a source for each example. info ([`DatasetInfo`], *optional*): Dataset information, like description, citation, etc. <Added version="2.4.0"/> split ([`NamedSplit`], *optional*): Name of the dataset split. <Added version="2.4.0"/> stopping_strategy (`str`, defaults to `first_exhausted`): Two strategies are proposed right now, `first_exhausted` and `all_exhausted`. By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples. - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. Returns: [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets` parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of `IterableDataset`. Example: For regular datasets (map-style): ```python >>> from datasets import Dataset, interleave_datasets >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]}) >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24] For datasets in streaming mode (iterable): >>> from datasets import load_dataset, interleave_datasets >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True) >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True) >>> dataset = interleave_datasets([d1, d2]) >>> iterator = iter(dataset) >>> next(iterator) {'text': 'Mtendere Village was inspired by the vision...} >>> next(iterator) {'text': "Média de débat d'idées, de culture...} ``` """ from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets.") for i, dataset in enumerate(datasets): if not isinstance(dataset, (Dataset, IterableDataset)): if isinstance(dataset, (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( f"Dataset at position {i} has at least one split: {list(dataset)}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." ) if i == 0: dataset_type, other_type = ( (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) ) elif not isinstance(dataset, dataset_type): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.") if dataset_type is Dataset: return _interleave_map_style_datasets( datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy ) else: return _interleave_iterable_datasets( datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy )
interleave_datasets
datasets
50
src/datasets/load.py
def load_dataset( path: str, name: Optional[str] = None, data_dir: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, split: Optional[Union[str, Split]] = None, cache_dir: Optional[str] = None, features: Optional[Features] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, verification_mode: Optional[Union[VerificationMode, str]] = None, keep_in_memory: Optional[bool] = None, save_infos: bool = False, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, streaming: bool = False, num_proc: Optional[int] = None, storage_options: Optional[Dict] = None, trust_remote_code: bool = None, **config_kwargs, ) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]: """Load a dataset from the Hugging Face Hub, or a local dataset. You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`]. A dataset is a directory that contains: - some data files in generic formats (JSON, CSV, Parquet, text, etc.). - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures. Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online. This function does the following under the hood: 1. Download and import in the library the dataset script from `path` if it's not already cached inside the library. If the dataset has no dataset script, then a generic dataset script is imported instead (JSON, CSV, Parquet, text, etc.) Dataset scripts are small python scripts that define dataset builders. They define the citation, info and format of the dataset, contain the path or URL to the original data files and the code to load examples from the original data files. You can find the complete list of datasets in the Datasets [Hub](https://huggingface.co/datasets). 2. Run the dataset script which will: * Download the dataset file from the original URL (see the script) if it's not already available locally or cached. * Process and cache the dataset in typed Arrow tables for caching. Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types. They can be directly accessed from disk, loaded in RAM or even streamed over the web. 3. Return a dataset built from the requested splits in `split` (default: all). It also allows to load a dataset from a local directory or a dataset repository on the Hugging Face Hub without dataset script. In this case, it automatically loads all the data files from the directory or the dataset repository. Args: path (`str`): Path or name of the dataset. Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory. For local datasets: - if `path` is a local directory (containing data files only) -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory e.g. `'./path/to/directory/with/my/csv/data'`. - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory) -> load the dataset builder from the dataset script e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`. For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`]) - if `path` is a dataset repository on the HF hub (containing data files only) -> load a generic dataset builder (csv, text etc.) based on the content of the repository e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files. - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory) -> load the dataset builder from the dataset script in the dataset repository e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`. name (`str`, *optional*): Defining the name of the dataset configuration. data_dir (`str`, *optional*): Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`, the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory. data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). split (`Split` or `str`): Which split of the data to load. If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`). If given, will return a single Dataset. Splits can be combined and specified like in tensorflow-datasets. cache_dir (`str`, *optional*): Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`. features (`Features`, *optional*): Set the features type to use for this dataset. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`): Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...). <Added version="2.9.1"/> keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. save_infos (`bool`, defaults to `False`): Save the dataset information (checksums/size/splits/...). revision ([`Version`] or `str`, *optional*): Version of the dataset script to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. streaming (`bool`, defaults to `False`): If set to `True`, don't download the data files. Instead, it streams the data progressively while iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case. Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example. Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats like rar and xz are not yet supported. The tgz format doesn't allow streaming. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.7.0"/> storage_options (`dict`, *optional*, defaults to `None`): **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any. <Added version="2.11.0"/> trust_remote_code (`bool`, defaults to `False`): Whether or not to allow for datasets defined on the Hub using a dataset script. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. <Added version="2.16.0"/> <Changed version="2.20.0"> `trust_remote_code` defaults to `False` if not specified. </Changed> **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the `BuilderConfig` and used in the [`DatasetBuilder`]. Returns: [`Dataset`] or [`DatasetDict`]: - if `split` is not `None`: the dataset requested, - if `split` is `None`, a [`~datasets.DatasetDict`] with each split. or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True` - if `split` is not `None`, the dataset is requested - if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split. Example: Load a dataset from the Hugging Face Hub: ```py >>> from datasets import load_dataset >>> ds = load_dataset('rotten_tomatoes', split='train') # Map data files to splits >>> data_files = {'train': 'train.csv', 'test': 'test.csv'} >>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files) ``` Load a local dataset: ```py # Load a CSV file >>> from datasets import load_dataset >>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv') # Load a JSON file >>> from datasets import load_dataset >>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json') # Load from a local loading script >>> from datasets import load_dataset >>> ds = load_dataset('path/to/local/loading_script/loading_script.py', split='train') ``` Load an [`~datasets.IterableDataset`]: ```py >>> from datasets import load_dataset >>> ds = load_dataset('rotten_tomatoes', split='train', streaming=True) ``` Load an image dataset with the `ImageFolder` dataset builder: ```py >>> from datasets import load_dataset >>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train') ``` """
/usr/src/app/target_test_cases/failed_tests_load_dataset.txt
def load_dataset( path: str, name: Optional[str] = None, data_dir: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, split: Optional[Union[str, Split]] = None, cache_dir: Optional[str] = None, features: Optional[Features] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, verification_mode: Optional[Union[VerificationMode, str]] = None, keep_in_memory: Optional[bool] = None, save_infos: bool = False, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, streaming: bool = False, num_proc: Optional[int] = None, storage_options: Optional[Dict] = None, trust_remote_code: bool = None, **config_kwargs, ) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]: """Load a dataset from the Hugging Face Hub, or a local dataset. You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`]. A dataset is a directory that contains: - some data files in generic formats (JSON, CSV, Parquet, text, etc.). - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures. Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online. This function does the following under the hood: 1. Download and import in the library the dataset script from `path` if it's not already cached inside the library. If the dataset has no dataset script, then a generic dataset script is imported instead (JSON, CSV, Parquet, text, etc.) Dataset scripts are small python scripts that define dataset builders. They define the citation, info and format of the dataset, contain the path or URL to the original data files and the code to load examples from the original data files. You can find the complete list of datasets in the Datasets [Hub](https://huggingface.co/datasets). 2. Run the dataset script which will: * Download the dataset file from the original URL (see the script) if it's not already available locally or cached. * Process and cache the dataset in typed Arrow tables for caching. Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types. They can be directly accessed from disk, loaded in RAM or even streamed over the web. 3. Return a dataset built from the requested splits in `split` (default: all). It also allows to load a dataset from a local directory or a dataset repository on the Hugging Face Hub without dataset script. In this case, it automatically loads all the data files from the directory or the dataset repository. Args: path (`str`): Path or name of the dataset. Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory. For local datasets: - if `path` is a local directory (containing data files only) -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory e.g. `'./path/to/directory/with/my/csv/data'`. - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory) -> load the dataset builder from the dataset script e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`. For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`]) - if `path` is a dataset repository on the HF hub (containing data files only) -> load a generic dataset builder (csv, text etc.) based on the content of the repository e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files. - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory) -> load the dataset builder from the dataset script in the dataset repository e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`. name (`str`, *optional*): Defining the name of the dataset configuration. data_dir (`str`, *optional*): Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`, the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory. data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). split (`Split` or `str`): Which split of the data to load. If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`). If given, will return a single Dataset. Splits can be combined and specified like in tensorflow-datasets. cache_dir (`str`, *optional*): Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`. features (`Features`, *optional*): Set the features type to use for this dataset. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`): Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...). <Added version="2.9.1"/> keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. save_infos (`bool`, defaults to `False`): Save the dataset information (checksums/size/splits/...). revision ([`Version`] or `str`, *optional*): Version of the dataset script to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. streaming (`bool`, defaults to `False`): If set to `True`, don't download the data files. Instead, it streams the data progressively while iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case. Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example. Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats like rar and xz are not yet supported. The tgz format doesn't allow streaming. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.7.0"/> storage_options (`dict`, *optional*, defaults to `None`): **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any. <Added version="2.11.0"/> trust_remote_code (`bool`, defaults to `False`): Whether or not to allow for datasets defined on the Hub using a dataset script. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. <Added version="2.16.0"/> <Changed version="2.20.0"> `trust_remote_code` defaults to `False` if not specified. </Changed> **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the `BuilderConfig` and used in the [`DatasetBuilder`]. Returns: [`Dataset`] or [`DatasetDict`]: - if `split` is not `None`: the dataset requested, - if `split` is `None`, a [`~datasets.DatasetDict`] with each split. or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True` - if `split` is not `None`, the dataset is requested - if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split. Example: Load a dataset from the Hugging Face Hub: ```py >>> from datasets import load_dataset >>> ds = load_dataset('rotten_tomatoes', split='train') # Map data files to splits >>> data_files = {'train': 'train.csv', 'test': 'test.csv'} >>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files) ``` Load a local dataset: ```py # Load a CSV file >>> from datasets import load_dataset >>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv') # Load a JSON file >>> from datasets import load_dataset >>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json') # Load from a local loading script >>> from datasets import load_dataset >>> ds = load_dataset('path/to/local/loading_script/loading_script.py', split='train') ``` Load an [`~datasets.IterableDataset`]: ```py >>> from datasets import load_dataset >>> ds = load_dataset('rotten_tomatoes', split='train', streaming=True) ``` Load an image dataset with the `ImageFolder` dataset builder: ```py >>> from datasets import load_dataset >>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train') ``` """ if data_files is not None and not data_files: raise ValueError(f"Empty 'data_files': '{data_files}'. It should be either non-empty or None (default).") if Path(path, config.DATASET_STATE_JSON_FILENAME).exists(): raise ValueError( "You are trying to load a dataset that was saved using `save_to_disk`. " "Please use `load_from_disk` instead." ) if streaming and num_proc is not None: raise NotImplementedError( "Loading a streaming dataset in parallel with `num_proc` is not implemented. " "To parallelize streaming, you can wrap the dataset with a PyTorch DataLoader using `num_workers` > 1 instead." ) download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) verification_mode = VerificationMode( (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS ) # Create a dataset builder builder_instance = load_dataset_builder( path=path, name=name, data_dir=data_dir, data_files=data_files, cache_dir=cache_dir, features=features, download_config=download_config, download_mode=download_mode, revision=revision, token=token, storage_options=storage_options, trust_remote_code=trust_remote_code, _require_default_config_name=name is None, **config_kwargs, ) # Return iterable dataset in case of streaming if streaming: return builder_instance.as_streaming_dataset(split=split) # Download and prepare data builder_instance.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, num_proc=num_proc, storage_options=storage_options, ) # Build dataset for splits keep_in_memory = ( keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) ) ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory) if save_infos: builder_instance._save_infos() return ds
load_dataset
datasets
51
src/datasets/load.py
def load_dataset_builder( path: str, name: Optional[str] = None, data_dir: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, cache_dir: Optional[str] = None, features: Optional[Features] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, storage_options: Optional[Dict] = None, trust_remote_code: Optional[bool] = None, _require_default_config_name=True, **config_kwargs, ) -> DatasetBuilder: """Load a dataset builder from the Hugging Face Hub, or a local dataset. A dataset builder can be used to inspect general information that is required to build a dataset (cache directory, config, dataset info, etc.) without downloading the dataset itself. You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`]. A dataset is a directory that contains: - some data files in generic formats (JSON, CSV, Parquet, text, etc.) - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures. Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online. Args: path (`str`): Path or name of the dataset. Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory. For local datasets: - if `path` is a local directory (containing data files only) -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory e.g. `'./path/to/directory/with/my/csv/data'`. - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory) -> load the dataset builder from the dataset script e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`. For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`]) - if `path` is a dataset repository on the HF hub (containing data files only) -> load a generic dataset builder (csv, text etc.) based on the content of the repository e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files. - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory) -> load the dataset builder from the dataset script in the dataset repository e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`. name (`str`, *optional*): Defining the name of the dataset configuration. data_dir (`str`, *optional*): Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`, the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory. data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). cache_dir (`str`, *optional*): Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`. features ([`Features`], *optional*): Set the features type to use for this dataset. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. revision ([`Version`] or `str`, *optional*): Version of the dataset script to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. storage_options (`dict`, *optional*, defaults to `None`): **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any. <Added version="2.11.0"/> trust_remote_code (`bool`, defaults to `False`): Whether or not to allow for datasets defined on the Hub using a dataset script. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. <Added version="2.16.0"/> <Changed version="2.20.0"> `trust_remote_code` defaults to `False` if not specified. </Changed> **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`]. Returns: [`DatasetBuilder`] Example: ```py >>> from datasets import load_dataset_builder >>> ds_builder = load_dataset_builder('rotten_tomatoes') >>> ds_builder.info.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """
/usr/src/app/target_test_cases/failed_tests_load_dataset_builder.txt
def load_dataset_builder( path: str, name: Optional[str] = None, data_dir: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, cache_dir: Optional[str] = None, features: Optional[Features] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, storage_options: Optional[Dict] = None, trust_remote_code: Optional[bool] = None, _require_default_config_name=True, **config_kwargs, ) -> DatasetBuilder: """Load a dataset builder from the Hugging Face Hub, or a local dataset. A dataset builder can be used to inspect general information that is required to build a dataset (cache directory, config, dataset info, etc.) without downloading the dataset itself. You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`]. A dataset is a directory that contains: - some data files in generic formats (JSON, CSV, Parquet, text, etc.) - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures. Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online. Args: path (`str`): Path or name of the dataset. Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory. For local datasets: - if `path` is a local directory (containing data files only) -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory e.g. `'./path/to/directory/with/my/csv/data'`. - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory) -> load the dataset builder from the dataset script e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`. For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`]) - if `path` is a dataset repository on the HF hub (containing data files only) -> load a generic dataset builder (csv, text etc.) based on the content of the repository e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files. - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory) -> load the dataset builder from the dataset script in the dataset repository e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`. name (`str`, *optional*): Defining the name of the dataset configuration. data_dir (`str`, *optional*): Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`, the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory. data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). cache_dir (`str`, *optional*): Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`. features ([`Features`], *optional*): Set the features type to use for this dataset. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. revision ([`Version`] or `str`, *optional*): Version of the dataset script to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. storage_options (`dict`, *optional*, defaults to `None`): **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any. <Added version="2.11.0"/> trust_remote_code (`bool`, defaults to `False`): Whether or not to allow for datasets defined on the Hub using a dataset script. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. <Added version="2.16.0"/> <Changed version="2.20.0"> `trust_remote_code` defaults to `False` if not specified. </Changed> **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`]. Returns: [`DatasetBuilder`] Example: ```py >>> from datasets import load_dataset_builder >>> ds_builder = load_dataset_builder('rotten_tomatoes') >>> ds_builder.info.features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) if token is not None: download_config = download_config.copy() if download_config else DownloadConfig() download_config.token = token if storage_options is not None: download_config = download_config.copy() if download_config else DownloadConfig() download_config.storage_options.update(storage_options) dataset_module = dataset_module_factory( path, revision=revision, download_config=download_config, download_mode=download_mode, data_dir=data_dir, data_files=data_files, cache_dir=cache_dir, trust_remote_code=trust_remote_code, _require_default_config_name=_require_default_config_name, _require_custom_configs=bool(config_kwargs), ) # Get dataset builder class from the processing script builder_kwargs = dataset_module.builder_kwargs data_dir = builder_kwargs.pop("data_dir", data_dir) data_files = builder_kwargs.pop("data_files", data_files) config_name = builder_kwargs.pop( "config_name", name or dataset_module.builder_configs_parameters.default_config_name ) dataset_name = builder_kwargs.pop("dataset_name", None) info = dataset_module.dataset_infos.get(config_name) if dataset_module.dataset_infos else None if ( path in _PACKAGED_DATASETS_MODULES and data_files is None and dataset_module.builder_configs_parameters.builder_configs[0].data_files is None ): error_msg = f"Please specify the data files or data directory to load for the {path} dataset builder." example_extensions = [ extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path ] if example_extensions: error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`' raise ValueError(error_msg) builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name) # Instantiate the dataset builder builder_instance: DatasetBuilder = builder_cls( cache_dir=cache_dir, dataset_name=dataset_name, config_name=config_name, data_dir=data_dir, data_files=data_files, hash=dataset_module.hash, info=info, features=features, token=token, storage_options=storage_options, **builder_kwargs, **config_kwargs, ) builder_instance._use_legacy_cache_dir_if_possible(dataset_module) return builder_instance
load_dataset_builder
datasets
52
src/datasets/load.py
def load_from_disk( dataset_path: PathLike, keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None ) -> Union[Dataset, DatasetDict]: """ Loads a dataset that was previously saved using [`~Dataset.save_to_disk`] from a dataset directory, or from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. Args: dataset_path (`path-like`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3://my-bucket/dataset/train"`) of the [`Dataset`] or [`DatasetDict`] directory where the dataset/dataset-dict will be loaded from. keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Returns: [`Dataset`] or [`DatasetDict`]: - If `dataset_path` is a path of a dataset directory: the dataset requested. - If `dataset_path` is a path of a dataset dict directory, a [`DatasetDict`] with each split. Example: ```py >>> from datasets import load_from_disk >>> ds = load_from_disk('path/to/dataset/directory') ``` """
/usr/src/app/target_test_cases/failed_tests_load_from_disk.txt
def load_from_disk( dataset_path: PathLike, keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None ) -> Union[Dataset, DatasetDict]: """ Loads a dataset that was previously saved using [`~Dataset.save_to_disk`] from a dataset directory, or from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. Args: dataset_path (`path-like`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3://my-bucket/dataset/train"`) of the [`Dataset`] or [`DatasetDict`] directory where the dataset/dataset-dict will be loaded from. keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Returns: [`Dataset`] or [`DatasetDict`]: - If `dataset_path` is a path of a dataset directory: the dataset requested. - If `dataset_path` is a path of a dataset dict directory, a [`DatasetDict`] with each split. Example: ```py >>> from datasets import load_from_disk >>> ds = load_from_disk('path/to/dataset/directory') ``` """ fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_path, **(storage_options or {})) if not fs.exists(dataset_path): raise FileNotFoundError(f"Directory {dataset_path} not found") if fs.isfile(posixpath.join(dataset_path, config.DATASET_INFO_FILENAME)) and fs.isfile( posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME) ): return Dataset.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options) elif fs.isfile(posixpath.join(dataset_path, config.DATASETDICT_JSON_FILENAME)): return DatasetDict.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options) else: raise FileNotFoundError( f"Directory {dataset_path} is neither a `Dataset` directory nor a `DatasetDict` directory." )
load_from_disk
datasets
53
src/datasets/arrow_reader.py
def make_file_instructions( name: str, split_infos: List["SplitInfo"], instruction: Union[str, "ReadInstruction"], filetype_suffix: Optional[str] = None, prefix_path: Optional[str] = None, ) -> FileInstructions: """Returns instructions of the split dict. Args: name (`str`): Name of the dataset. split_infos (`list` of `[SplitInfo]`): Dataset splits information. instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset. filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'. prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name. Returns: [`FileInstructions`] """
/usr/src/app/target_test_cases/failed_tests_make_file_instructions.txt
def make_file_instructions( name: str, split_infos: List["SplitInfo"], instruction: Union[str, "ReadInstruction"], filetype_suffix: Optional[str] = None, prefix_path: Optional[str] = None, ) -> FileInstructions: """Returns instructions of the split dict. Args: name (`str`): Name of the dataset. split_infos (`list` of `[SplitInfo]`): Dataset splits information. instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset. filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'. prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name. Returns: [`FileInstructions`] """ if not isinstance(name, str): raise TypeError(f"Expected str 'name', but got: {type(name).__name__}") elif not name: raise ValueError("Expected non-empty str 'name'") name2len = {info.name: info.num_examples for info in split_infos} name2shard_lengths = {info.name: info.shard_lengths for info in split_infos} name2filenames = { info.name: filenames_for_dataset_split( path=prefix_path, dataset_name=name, split=info.name, filetype_suffix=filetype_suffix, shard_lengths=name2shard_lengths[info.name], ) for info in split_infos } if not isinstance(instruction, ReadInstruction): instruction = ReadInstruction.from_spec(instruction) # Create the absolute instruction (per split) absolute_instructions = instruction.to_absolute(name2len) # For each split, return the files instruction (skip/take) file_instructions = [] num_examples = 0 for abs_instr in absolute_instructions: split_length = name2len[abs_instr.splitname] filenames = name2filenames[abs_instr.splitname] shard_lengths = name2shard_lengths[abs_instr.splitname] from_ = 0 if abs_instr.from_ is None else abs_instr.from_ to = split_length if abs_instr.to is None else abs_instr.to if shard_lengths is None: # not sharded for filename in filenames: take = to - from_ if take == 0: continue num_examples += take file_instructions.append({"filename": filename, "skip": from_, "take": take}) else: # sharded index_start = 0 # Beginning (included) of moving window. index_end = 0 # End (excluded) of moving window. for filename, shard_length in zip(filenames, shard_lengths): index_end += shard_length if from_ < index_end and to > index_start: # There is something to take. skip = from_ - index_start if from_ > index_start else 0 take = to - index_start - skip if to < index_end else -1 if take == 0: continue file_instructions.append({"filename": filename, "skip": skip, "take": take}) num_examples += shard_length - skip if take == -1 else take index_start += shard_length return FileInstructions( num_examples=num_examples, file_instructions=file_instructions, )
make_file_instructions
datasets
54
src/datasets/utils/py_utils.py
def map_nested( function: Callable[[Any], Any], data_struct: Any, dict_only: bool = False, map_list: bool = True, map_tuple: bool = False, map_numpy: bool = False, num_proc: Optional[int] = None, parallel_min_length: int = 2, batched: bool = False, batch_size: Optional[int] = 1000, types: Optional[tuple] = None, disable_tqdm: bool = True, desc: Optional[str] = None, ) -> Any: """Apply a function recursively to each element of a nested data struct. Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to `parallel_min_length`. <Changed version="2.5.0"> Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``. Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and multiprocessing is used. </Changed> Args: function (`Callable`): Function to be applied to `data_struct`. data_struct (`Any`): Data structure to apply `function` to. dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in `data_struct`. map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict` values). map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides `dict` values). map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides `dict` values). num_proc (`int`, *optional*): Number of processes. The level in the data struct used for multiprocessing is the first level that has smaller sub-structs, starting from the root. parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel processing. <Added version="2.5.0"/> batched (`bool`, defaults to `False`): Provide batch of items to `function`. <Added version="2.19.0"/> batch_size (`int`, *optional*, defaults to `1000`): Number of items per batch provided to `function` if `batched=True`. If `batch_size <= 0` or `batch_size == None`, provide the full iterable as a single batch to `function`. <Added version="2.19.0"/> types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their elements. disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar. desc (`str`, *optional*): Prefix for the tqdm progressbar. Returns: `Any` """
/usr/src/app/target_test_cases/failed_tests_map_nested.txt
def map_nested( function: Callable[[Any], Any], data_struct: Any, dict_only: bool = False, map_list: bool = True, map_tuple: bool = False, map_numpy: bool = False, num_proc: Optional[int] = None, parallel_min_length: int = 2, batched: bool = False, batch_size: Optional[int] = 1000, types: Optional[tuple] = None, disable_tqdm: bool = True, desc: Optional[str] = None, ) -> Any: """Apply a function recursively to each element of a nested data struct. Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to `parallel_min_length`. <Changed version="2.5.0"> Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``. Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and multiprocessing is used. </Changed> Args: function (`Callable`): Function to be applied to `data_struct`. data_struct (`Any`): Data structure to apply `function` to. dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in `data_struct`. map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict` values). map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides `dict` values). map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides `dict` values). num_proc (`int`, *optional*): Number of processes. The level in the data struct used for multiprocessing is the first level that has smaller sub-structs, starting from the root. parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel processing. <Added version="2.5.0"/> batched (`bool`, defaults to `False`): Provide batch of items to `function`. <Added version="2.19.0"/> batch_size (`int`, *optional*, defaults to `1000`): Number of items per batch provided to `function` if `batched=True`. If `batch_size <= 0` or `batch_size == None`, provide the full iterable as a single batch to `function`. <Added version="2.19.0"/> types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their elements. disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar. desc (`str`, *optional*): Prefix for the tqdm progressbar. Returns: `Any` """ if types is None: types = [] if not dict_only: if map_list: types.append(list) if map_tuple: types.append(tuple) if map_numpy: types.append(np.ndarray) types = tuple(types) # Singleton if not isinstance(data_struct, dict) and not isinstance(data_struct, types): if batched: data_struct = [data_struct] mapped = function(data_struct) if batched: mapped = mapped[0] return mapped iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct if num_proc is None: num_proc = 1 if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable): mapped = [ map_nested( function=function, data_struct=obj, num_proc=num_proc, parallel_min_length=parallel_min_length, batched=batched, batch_size=batch_size, types=types, ) for obj in iterable ] elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length: if batched: if batch_size is None or batch_size <= 0: batch_size = max(len(iterable) // num_proc + int(len(iterable) % num_proc > 0), 1) iterable = list(iter_batched(iterable, batch_size)) mapped = [ _single_map_nested((function, obj, batched, batch_size, types, None, True, None)) for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc) ] if batched: mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch] else: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=".* is experimental and might be subject to breaking changes in the future\\.$", category=UserWarning, ) if batched: if batch_size is None or batch_size <= 0: batch_size = len(iterable) // num_proc + int(len(iterable) % num_proc > 0) iterable = list(iter_batched(iterable, batch_size)) mapped = parallel_map( function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, _single_map_nested ) if batched: mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch] if isinstance(data_struct, dict): return dict(zip(data_struct.keys(), mapped)) else: if isinstance(data_struct, list): return mapped elif isinstance(data_struct, tuple): return tuple(mapped) else: return np.array(mapped)
map_nested
datasets
55
src/datasets/formatting/formatting.py
def query_table( table: Table, key: Union[int, slice, range, str, Iterable], indices: Optional[Table] = None, ) -> pa.Table: """ Query a Table to extract the subtable that correspond to the given key. Args: table (``datasets.table.Table``): The input Table to query from key (``Union[int, slice, range, str, Iterable]``): The key can be of different types: - an integer i: the subtable containing only the i-th row - a slice [i:j:k]: the subtable containing the rows that correspond to this slice - a range(i, j, k): the subtable containing the rows that correspond to this range - a string c: the subtable containing all the rows but only the column c - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows. The indices table must contain one column named "indices" of type uint64. This is used in case of shuffling or rows selection. Returns: ``pyarrow.Table``: the result of the query on the input table """
/usr/src/app/target_test_cases/failed_tests_query_table.txt
def query_table( table: Table, key: Union[int, slice, range, str, Iterable], indices: Optional[Table] = None, ) -> pa.Table: """ Query a Table to extract the subtable that correspond to the given key. Args: table (``datasets.table.Table``): The input Table to query from key (``Union[int, slice, range, str, Iterable]``): The key can be of different types: - an integer i: the subtable containing only the i-th row - a slice [i:j:k]: the subtable containing the rows that correspond to this slice - a range(i, j, k): the subtable containing the rows that correspond to this range - a string c: the subtable containing all the rows but only the column c - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows. The indices table must contain one column named "indices" of type uint64. This is used in case of shuffling or rows selection. Returns: ``pyarrow.Table``: the result of the query on the input table """ # Check if key is valid if not isinstance(key, (int, slice, range, str, Iterable)): try: key = operator.index(key) except TypeError: _raise_bad_key_type(key) if isinstance(key, str): _check_valid_column_key(key, table.column_names) else: size = indices.num_rows if indices is not None else table.num_rows _check_valid_index_key(key, size) # Query the main table if indices is None: pa_subtable = _query_table(table, key) else: pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices) return pa_subtable
query_table
datasets
56
src/datasets/distributed.py
def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType: """ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`. For map-style datasets: Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset. To maximize data loading throughput, chunks are made of contiguous data on disk if possible. For iterable datasets: If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`), then the shards are evenly assigned across the nodes, which is the most optimized. Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples. Args: dataset ([`Dataset`] or [`IterableDataset`]): The dataset to split by node. rank (`int`): Rank of the current node. world_size (`int`): Total number of nodes. Returns: [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`. """
/usr/src/app/target_test_cases/failed_tests_split_dataset_by_node.txt
def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType: """ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`. For map-style datasets: Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset. To maximize data loading throughput, chunks are made of contiguous data on disk if possible. For iterable datasets: If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`), then the shards are evenly assigned across the nodes, which is the most optimized. Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples. Args: dataset ([`Dataset`] or [`IterableDataset`]): The dataset to split by node. rank (`int`): Rank of the current node. world_size (`int`): Total number of nodes. Returns: [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`. """ if isinstance(dataset, Dataset): return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size) else: return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
split_dataset_by_node
datasets
57
src/datasets/table.py
def table_cast(table: pa.Table, schema: pa.Schema): """Improved version of `pa.Table.cast`. It supports casting to feature types stored in the schema metadata. Args: table (`pyarrow.Table`): PyArrow table to cast. schema (`pyarrow.Schema`): Target PyArrow schema. Returns: table (`pyarrow.Table`): the casted table """
/usr/src/app/target_test_cases/failed_tests_table_cast.txt
def table_cast(table: pa.Table, schema: pa.Schema): """Improved version of `pa.Table.cast`. It supports casting to feature types stored in the schema metadata. Args: table (`pyarrow.Table`): PyArrow table to cast. schema (`pyarrow.Schema`): Target PyArrow schema. Returns: table (`pyarrow.Table`): the casted table """ if table.schema != schema: return cast_table_to_schema(table, schema) elif table.schema.metadata != schema.metadata: return table.replace_schema_metadata(schema.metadata) else: return table
table_cast
datasets
58
src/datasets/utils/file_utils.py
def xjoin(a, *p): """ This function extends os.path.join to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xjoin function allows you to apply the join on the first path of the chain. Example:: >>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt") zip://folder1/file.txt::https://host.com/archive.zip """
/usr/src/app/target_test_cases/failed_tests_xjoin.txt
def xjoin(a, *p): """ This function extends os.path.join to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xjoin function allows you to apply the join on the first path of the chain. Example:: >>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt") zip://folder1/file.txt::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): return os.path.join(a, *p) else: a = posixpath.join(a, *p) return "::".join([a] + b)
xjoin
pylint
0
pylint/lint/pylinter.py
def load_plugin_configuration(self) -> None: """Call the configuration hook for plugins. This walks through the list of plugins, grabs the "load_configuration" hook, if exposed, and calls it to allow plugins to configure specific settings. The result of attempting to load the plugin of the given name is stored in the dynamic plugins dictionary in ``load_plugin_modules`` above. ..note:: This function previously always tried to load modules again, which led to some confusion and silent failure conditions as described in GitHub issue #7264. Making it use the stored result is more efficient, and means that we avoid the ``init-hook`` problems from before. """
/usr/src/app/target_test_cases/failed_tests_PyLinter.load_plugin_configuration.txt
def load_plugin_configuration(self) -> None: """Call the configuration hook for plugins. This walks through the list of plugins, grabs the "load_configuration" hook, if exposed, and calls it to allow plugins to configure specific settings. The result of attempting to load the plugin of the given name is stored in the dynamic plugins dictionary in ``load_plugin_modules`` above. ..note:: This function previously always tried to load modules again, which led to some confusion and silent failure conditions as described in GitHub issue #7264. Making it use the stored result is more efficient, and means that we avoid the ``init-hook`` problems from before. """ for modname, module_or_error in self._dynamic_plugins.items(): if isinstance(module_or_error, ModuleNotFoundError): self.add_message( "bad-plugin-value", args=(modname, module_or_error), line=0 ) elif hasattr(module_or_error, "load_configuration"): module_or_error.load_configuration(self) # We re-set all the dictionary values to True here to make sure the dict # is pickle-able. This is only a problem in multiprocessing/parallel mode. # (e.g. invoking pylint -j 2) self._dynamic_plugins = { modname: not isinstance(val, ModuleNotFoundError) for modname, val in self._dynamic_plugins.items() }
PyLinter.load_plugin_configuration
pylint
1
pylint/extensions/for_any_all.py
def _assigned_reassigned_returned( node: nodes.For, if_children: list[nodes.NodeNG], node_after_loop: nodes.NodeNG ) -> bool: """Detect boolean-assign, for-loop, re-assign, return pattern: Ex: def check_lines(lines, max_chars): long_line = False for line in lines: if len(line) > max_chars: long_line = True # no elif / else statement return long_line """
/usr/src/app/target_test_cases/failed_tests__assigned_reassigned_returned.txt
def _assigned_reassigned_returned( node: nodes.For, if_children: list[nodes.NodeNG], node_after_loop: nodes.NodeNG ) -> bool: """Detect boolean-assign, for-loop, re-assign, return pattern: Ex: def check_lines(lines, max_chars): long_line = False for line in lines: if len(line) > max_chars: long_line = True # no elif / else statement return long_line """ node_before_loop = node.previous_sibling() if not assigned_bool(node_before_loop): # node before loop isn't assigning to boolean return False assign_children = [x for x in if_children if isinstance(x, nodes.Assign)] if not assign_children: # if-nodes inside loop aren't assignments return False # We only care for the first assign node of the if-children. Otherwise it breaks the pattern. first_target = assign_children[0].targets[0] target_before_loop = node_before_loop.targets[0] if not ( isinstance(first_target, nodes.AssignName) and isinstance(target_before_loop, nodes.AssignName) ): return False node_before_loop_name = node_before_loop.targets[0].name return ( first_target.name == node_before_loop_name and isinstance(node_after_loop, nodes.Return) and isinstance(node_after_loop.value, nodes.Name) and node_after_loop.value.name == node_before_loop_name )
_assigned_reassigned_returned
pylint
2
pylint/extensions/code_style.py
def _check_consider_using_assignment_expr(self, node: nodes.If) -> None: """Check if an assignment expression (walrus operator) can be used. For example if an assignment is directly followed by an if statement: >>> x = 2 >>> if x: >>> ... Can be replaced by: >>> if (x := 2): >>> ... Note: Assignment expressions were added in Python 3.8 """
/usr/src/app/target_test_cases/failed_tests__check_consider_using_assignment_expr.txt
def _check_consider_using_assignment_expr(self, node: nodes.If) -> None: """Check if an assignment expression (walrus operator) can be used. For example if an assignment is directly followed by an if statement: >>> x = 2 >>> if x: >>> ... Can be replaced by: >>> if (x := 2): >>> ... Note: Assignment expressions were added in Python 3.8 """ # Check if `node.test` contains a `Name` node node_name: nodes.Name | None = None if isinstance(node.test, nodes.Name): node_name = node.test elif ( isinstance(node.test, nodes.UnaryOp) and node.test.op == "not" and isinstance(node.test.operand, nodes.Name) ): node_name = node.test.operand elif ( isinstance(node.test, nodes.Compare) and isinstance(node.test.left, nodes.Name) and len(node.test.ops) == 1 ): node_name = node.test.left else: return # Make sure the previous node is an assignment to the same name # used in `node.test`. Furthermore, ignore if assignment spans multiple lines. prev_sibling = node.previous_sibling() if CodeStyleChecker._check_prev_sibling_to_if_stmt( prev_sibling, node_name.name ): # Check if match statement would be a better fit. # I.e. multiple ifs that test the same name. if CodeStyleChecker._check_ignore_assignment_expr_suggestion( node, node_name.name ): return # Build suggestion string. Check length of suggestion # does not exceed max-line-length-suggestions test_str = node.test.as_string().replace( node_name.name, f"({node_name.name} := {prev_sibling.value.as_string()})", 1, ) suggestion = f"if {test_str}:" if ( node.col_offset is not None and len(suggestion) + node.col_offset > self._max_length or len(suggestion) > self._max_length ): return self.add_message( "consider-using-assignment-expr", node=node_name, args=(suggestion,), )
_check_consider_using_assignment_expr
pylint
3
pylint/lint/pylinter.py
def _check_file( self, get_ast: GetAstProtocol, check_astroid_module: Callable[[nodes.Module], bool | None], file: FileItem, ) -> None: """Check a file using the passed utility functions (get_ast and check_astroid_module). :param callable get_ast: callable returning AST from defined file taking the following arguments - filepath: path to the file to check - name: Python module name :param callable check_astroid_module: callable checking an AST taking the following arguments - ast: AST of the module :param FileItem file: data about the file :raises AstroidError: for any failures stemming from astroid """
/usr/src/app/target_test_cases/failed_tests__check_file.txt
def _check_file( self, get_ast: GetAstProtocol, check_astroid_module: Callable[[nodes.Module], bool | None], file: FileItem, ) -> None: """Check a file using the passed utility functions (get_ast and check_astroid_module). :param callable get_ast: callable returning AST from defined file taking the following arguments - filepath: path to the file to check - name: Python module name :param callable check_astroid_module: callable checking an AST taking the following arguments - ast: AST of the module :param FileItem file: data about the file :raises AstroidError: for any failures stemming from astroid """ self.set_current_module(file.name, file.filepath) # get the module representation ast_node = get_ast(file.filepath, file.name) if ast_node is None: return self._ignore_file = False self.file_state = FileState(file.modpath, self.msgs_store, ast_node) # fix the current file (if the source file was not available or # if it's actually a c extension) self.current_file = ast_node.file try: check_astroid_module(ast_node) except Exception as e: # pragma: no cover raise astroid.AstroidError from e # warn about spurious inline messages handling spurious_messages = self.file_state.iter_spurious_suppression_messages( self.msgs_store ) for msgid, line, args in spurious_messages: self.add_message(msgid, line, None, args)
_check_file
pylint
4
pylint/checkers/variables.py
def _check_loop_finishes_via_except( node: nodes.NodeNG, other_node_try_except: nodes.Try, ) -> bool: """Check for a specific control flow scenario. Described in https://github.com/pylint-dev/pylint/issues/5683. A scenario where the only non-break exit from a loop consists of the very except handler we are examining, such that code in the `else` branch of the loop can depend on it being assigned. Example: for _ in range(3): try: do_something() except: name = 1 <-- only non-break exit from loop else: break else: print(name) """
/usr/src/app/target_test_cases/failed_tests__check_loop_finishes_via_except.txt
def _check_loop_finishes_via_except( node: nodes.NodeNG, other_node_try_except: nodes.Try, ) -> bool: """Check for a specific control flow scenario. Described in https://github.com/pylint-dev/pylint/issues/5683. A scenario where the only non-break exit from a loop consists of the very except handler we are examining, such that code in the `else` branch of the loop can depend on it being assigned. Example: for _ in range(3): try: do_something() except: name = 1 <-- only non-break exit from loop else: break else: print(name) """ if not other_node_try_except.orelse: return False closest_loop: None | (nodes.For | nodes.While) = ( utils.get_node_first_ancestor_of_type(node, (nodes.For, nodes.While)) ) if closest_loop is None: return False if not any( else_statement is node or else_statement.parent_of(node) for else_statement in closest_loop.orelse ): # `node` not guarded by `else` return False for inner_else_statement in other_node_try_except.orelse: if isinstance(inner_else_statement, nodes.Break): break_stmt = inner_else_statement break else: # No break statement return False def _try_in_loop_body( other_node_try_except: nodes.Try, loop: nodes.For | nodes.While, ) -> bool: """Return True if `other_node_try_except` is a descendant of `loop`.""" return any( loop_body_statement is other_node_try_except or loop_body_statement.parent_of(other_node_try_except) for loop_body_statement in loop.body ) if not _try_in_loop_body(other_node_try_except, closest_loop): for ancestor in closest_loop.node_ancestors(): if isinstance(ancestor, (nodes.For, nodes.While)): if _try_in_loop_body(other_node_try_except, ancestor): break else: # `other_node_try_except` didn't have a shared ancestor loop return False for loop_stmt in closest_loop.body: if NamesConsumer._recursive_search_for_continue_before_break( loop_stmt, break_stmt ): break else: # No continue found, so we arrived at our special case! return True return False
_check_loop_finishes_via_except
pylint
5
pylint/checkers/classes/class_checker.py
def _check_protected_attribute_access( self, node: nodes.Attribute | nodes.AssignAttr ) -> None: """Given an attribute access node (set or get), check if attribute access is legitimate. Call _check_first_attr with node before calling this method. Valid cases are: * self._attr in a method or cls._attr in a classmethod. Checked by _check_first_attr. * Klass._attr inside "Klass" class. * Klass2._attr inside "Klass" class when Klass2 is a base class of Klass. """
/usr/src/app/target_test_cases/failed_tests__check_protected_attribute_access.txt
def _check_protected_attribute_access( self, node: nodes.Attribute | nodes.AssignAttr ) -> None: """Given an attribute access node (set or get), check if attribute access is legitimate. Call _check_first_attr with node before calling this method. Valid cases are: * self._attr in a method or cls._attr in a classmethod. Checked by _check_first_attr. * Klass._attr inside "Klass" class. * Klass2._attr inside "Klass" class when Klass2 is a base class of Klass. """ attrname = node.attrname if ( not is_attr_protected(attrname) or attrname in self.linter.config.exclude_protected ): return # Typing annotations in function definitions can include protected members if utils.is_node_in_type_annotation_context(node): return # Return if `attrname` is defined at the module-level or as a class attribute # and is listed in `exclude-protected`. inferred = safe_infer(node.expr) if ( inferred and isinstance(inferred, (nodes.ClassDef, nodes.Module)) and f"{inferred.name}.{attrname}" in self.linter.config.exclude_protected ): return klass = node_frame_class(node) if klass is None: # We are not in a class, no remaining valid case self.add_message("protected-access", node=node, args=attrname) return # In classes, check we are not getting a parent method # through the class object or through super # If the expression begins with a call to super, that's ok. if ( isinstance(node.expr, nodes.Call) and isinstance(node.expr.func, nodes.Name) and node.expr.func.name == "super" ): return # If the expression begins with a call to type(self), that's ok. if self._is_type_self_call(node.expr): return # Check if we are inside the scope of a class or nested inner class inside_klass = True outer_klass = klass callee = node.expr.as_string() parents_callee = callee.split(".") parents_callee.reverse() for callee in parents_callee: if not outer_klass or callee != outer_klass.name: inside_klass = False break # Move up one level within the nested classes outer_klass = get_outer_class(outer_klass) # We are in a class, one remaining valid cases, Klass._attr inside # Klass if not (inside_klass or callee in klass.basenames): # Detect property assignments in the body of the class. # This is acceptable: # # class A: # b = property(lambda: self._b) stmt = node.parent.statement() if ( isinstance(stmt, nodes.Assign) and len(stmt.targets) == 1 and isinstance(stmt.targets[0], nodes.AssignName) ): name = stmt.targets[0].name if _is_attribute_property(name, klass): return if ( self._is_classmethod(node.frame()) and self._is_inferred_instance(node.expr, klass) and self._is_class_or_instance_attribute(attrname, klass) ): return licit_protected_member = not attrname.startswith("__") if ( not self.linter.config.check_protected_access_in_special_methods and licit_protected_member and self._is_called_inside_special_method(node) ): return self.add_message("protected-access", node=node, args=attrname)
_check_protected_attribute_access
pylint
6
pylint/checkers/variables.py
def _detect_global_scope( node: nodes.Name, frame: nodes.LocalsDictNodeNG, defframe: nodes.LocalsDictNodeNG, ) -> bool: """Detect that the given frames share a global scope. Two frames share a global scope when neither of them are hidden under a function scope, as well as any parent scope of them, until the root scope. In this case, depending from something defined later on will only work if guarded by a nested function definition. Example: class A: # B has the same global scope as `C`, leading to a NameError. # Return True to indicate a shared scope. class B(C): ... class C: ... Whereas this does not lead to a NameError: class A: def guard(): # Return False to indicate no scope sharing. class B(C): ... class C: ... """
/usr/src/app/target_test_cases/failed_tests__detect_global_scope.txt
def _detect_global_scope( node: nodes.Name, frame: nodes.LocalsDictNodeNG, defframe: nodes.LocalsDictNodeNG, ) -> bool: """Detect that the given frames share a global scope. Two frames share a global scope when neither of them are hidden under a function scope, as well as any parent scope of them, until the root scope. In this case, depending from something defined later on will only work if guarded by a nested function definition. Example: class A: # B has the same global scope as `C`, leading to a NameError. # Return True to indicate a shared scope. class B(C): ... class C: ... Whereas this does not lead to a NameError: class A: def guard(): # Return False to indicate no scope sharing. class B(C): ... class C: ... """ def_scope = scope = None if frame and frame.parent: scope = frame.parent.scope() if defframe and defframe.parent: def_scope = defframe.parent.scope() if ( isinstance(frame, nodes.ClassDef) and scope is not def_scope and scope is utils.get_node_first_ancestor_of_type(node, nodes.FunctionDef) ): # If the current node's scope is a class nested under a function, # and the def_scope is something else, then they aren't shared. return False if isinstance(frame, nodes.FunctionDef): # If the parent of the current node is a # function, then it can be under its scope (defined in); or # the `->` part of annotations. The same goes # for annotations of function arguments, they'll have # their parent the Arguments node. if frame.parent_of(defframe): return node.lineno < defframe.lineno # type: ignore[no-any-return] if not isinstance(node.parent, (nodes.FunctionDef, nodes.Arguments)): return False break_scopes = [] for current_scope in (scope or frame, def_scope): # Look for parent scopes. If there is anything different # than a module or a class scope, then the frames don't # share a global scope. parent_scope = current_scope while parent_scope: if not isinstance(parent_scope, (nodes.ClassDef, nodes.Module)): break_scopes.append(parent_scope) break if parent_scope.parent: parent_scope = parent_scope.parent.scope() else: break if len(set(break_scopes)) > 1: # Store different scopes than expected. # If the stored scopes are, in fact, the very same, then it means # that the two frames (frame and defframe) share the same scope, # and we could apply our lineno analysis over them. # For instance, this works when they are inside a function, the node # that uses a definition and the definition itself. return False # At this point, we are certain that frame and defframe share a scope # and the definition of the first depends on the second. return frame.lineno < defframe.lineno # type: ignore[no-any-return]
_detect_global_scope
pylint
7
pylint/checkers/unicode.py
def _determine_codec(stream: io.BytesIO) -> tuple[str, int]: """Determine the codec from the given stream. first tries https://www.python.org/dev/peps/pep-0263/ and if this fails also checks for BOMs of UTF-16 and UTF-32 to be future-proof. Args: stream: The byte stream to analyse Returns: A tuple consisting of: - normalized codec name - the line in which the codec was found Raises: SyntaxError: if failing to detect codec """
/usr/src/app/target_test_cases/failed_tests__determine_codec.txt
def _determine_codec(stream: io.BytesIO) -> tuple[str, int]: """Determine the codec from the given stream. first tries https://www.python.org/dev/peps/pep-0263/ and if this fails also checks for BOMs of UTF-16 and UTF-32 to be future-proof. Args: stream: The byte stream to analyse Returns: A tuple consisting of: - normalized codec name - the line in which the codec was found Raises: SyntaxError: if failing to detect codec """ try: # First try to detect encoding with PEP 263 # Doesn't work with UTF-16/32 at the time of writing # see https://bugs.python.org/issue1503789 codec, lines = detect_encoding(stream.readline) # lines are empty if UTF-8 BOM is found codec_definition_line = len(lines) or 1 except SyntaxError as e: # Codec could not be detected by Python, we try manually to check for # UTF 16/32 BOMs, which aren't supported by Python at the time of writing. # This is only included to be future save and handle these codecs as well stream.seek(0) try: codec = extract_codec_from_bom(stream.readline()) codec_definition_line = 1 except ValueError as ve: # Failed to detect codec, so the syntax error originated not from # UTF16/32 codec usage. So simply raise the error again. raise e from ve return _normalize_codec_name(codec), codec_definition_line
_determine_codec
pylint
8
pylint/checkers/typecheck.py
def _emit_no_member( node: nodes.Attribute | nodes.AssignAttr | nodes.DelAttr, owner: InferenceResult, owner_name: str | None, mixin_class_rgx: Pattern[str], ignored_mixins: bool = True, ignored_none: bool = True, ) -> bool: """Try to see if no-member should be emitted for the given owner. The following cases are ignored: * the owner is a function and it has decorators. * the owner is an instance and it has __getattr__, __getattribute__ implemented * the module is explicitly ignored from no-member checks * the owner is a class and the name can be found in its metaclass. * The access node is protected by an except handler, which handles AttributeError, Exception or bare except. * The node is guarded behind and `IF` or `IFExp` node """
/usr/src/app/target_test_cases/failed_tests__emit_no_member.txt
def _emit_no_member( node: nodes.Attribute | nodes.AssignAttr | nodes.DelAttr, owner: InferenceResult, owner_name: str | None, mixin_class_rgx: Pattern[str], ignored_mixins: bool = True, ignored_none: bool = True, ) -> bool: """Try to see if no-member should be emitted for the given owner. The following cases are ignored: * the owner is a function and it has decorators. * the owner is an instance and it has __getattr__, __getattribute__ implemented * the module is explicitly ignored from no-member checks * the owner is a class and the name can be found in its metaclass. * The access node is protected by an except handler, which handles AttributeError, Exception or bare except. * The node is guarded behind and `IF` or `IFExp` node """ # pylint: disable = too-many-return-statements, too-many-branches if node_ignores_exception(node, AttributeError): return False if ignored_none and isinstance(owner, nodes.Const) and owner.value is None: return False if is_super(owner) or getattr(owner, "type", None) == "metaclass": return False if owner_name and ignored_mixins and mixin_class_rgx.match(owner_name): return False if isinstance(owner, nodes.FunctionDef) and ( owner.decorators or owner.is_abstract() ): return False if isinstance(owner, (astroid.Instance, nodes.ClassDef)): # Issue #2565: Don't ignore enums, as they have a `__getattr__` but it's not # invoked at this point. try: metaclass = owner.metaclass() except astroid.MroError: pass else: # Renamed in Python 3.10 to `EnumType` if metaclass and metaclass.qname() in {"enum.EnumMeta", "enum.EnumType"}: return not _enum_has_attribute(owner, node) if owner.has_dynamic_getattr(): return False if not has_known_bases(owner): return False # Exclude typed annotations, since these might actually exist # at some point during the runtime of the program. if utils.is_attribute_typed_annotation(owner, node.attrname): return False if isinstance(owner, astroid.objects.Super): # Verify if we are dealing with an invalid Super object. # If it is invalid, then there's no point in checking that # it has the required attribute. Also, don't fail if the # MRO is invalid. try: owner.super_mro() except (astroid.MroError, astroid.SuperError): return False if not all(has_known_bases(base) for base in owner.type.mro()): return False if isinstance(owner, nodes.Module): try: owner.getattr("__getattr__") return False except astroid.NotFoundError: pass if owner_name and node.attrname.startswith("_" + owner_name): # Test if an attribute has been mangled ('private' attribute) unmangled_name = node.attrname.split("_" + owner_name)[-1] try: if owner.getattr(unmangled_name, context=None) is not None: return False except astroid.NotFoundError: return True # Don't emit no-member if guarded behind `IF` or `IFExp` # * Walk up recursively until if statement is found. # * Check if condition can be inferred as `Const`, # would evaluate as `False`, # and whether the node is part of the `body`. # * Continue checking until scope of node is reached. scope: nodes.NodeNG = node.scope() node_origin: nodes.NodeNG = node parent: nodes.NodeNG = node.parent while parent != scope: if isinstance(parent, (nodes.If, nodes.IfExp)): inferred = safe_infer(parent.test) if ( # pylint: disable=too-many-boolean-expressions isinstance(inferred, nodes.Const) and inferred.bool_value() is False and ( isinstance(parent, nodes.If) and node_origin in parent.body or isinstance(parent, nodes.IfExp) and node_origin == parent.body ) ): return False node_origin, parent = parent, parent.parent return True
_emit_no_member
pylint
9
pylint/checkers/symilar.py
def _find_common( self, lineset1: LineSet, lineset2: LineSet ) -> Generator[Commonality]: """Find similarities in the two given linesets. This the core of the algorithm. The idea is to compute the hashes of a minimal number of successive lines of each lineset and then compare the hashes. Every match of such comparison is stored in a dict that links the couple of starting indices in both linesets to the couple of corresponding starting and ending lines in both files. Last regroups all successive couples in a bigger one. It allows to take into account common chunk of lines that have more than the minimal number of successive lines required. """
/usr/src/app/target_test_cases/failed_tests__find_common.txt
def _find_common( self, lineset1: LineSet, lineset2: LineSet ) -> Generator[Commonality]: """Find similarities in the two given linesets. This the core of the algorithm. The idea is to compute the hashes of a minimal number of successive lines of each lineset and then compare the hashes. Every match of such comparison is stored in a dict that links the couple of starting indices in both linesets to the couple of corresponding starting and ending lines in both files. Last regroups all successive couples in a bigger one. It allows to take into account common chunk of lines that have more than the minimal number of successive lines required. """ hash_to_index_1: HashToIndex_T hash_to_index_2: HashToIndex_T index_to_lines_1: IndexToLines_T index_to_lines_2: IndexToLines_T hash_to_index_1, index_to_lines_1 = hash_lineset( lineset1, self.namespace.min_similarity_lines ) hash_to_index_2, index_to_lines_2 = hash_lineset( lineset2, self.namespace.min_similarity_lines ) hash_1: frozenset[LinesChunk] = frozenset(hash_to_index_1.keys()) hash_2: frozenset[LinesChunk] = frozenset(hash_to_index_2.keys()) common_hashes: Iterable[LinesChunk] = sorted( hash_1 & hash_2, key=lambda m: hash_to_index_1[m][0] ) # all_couples is a dict that links the couple of indices in both linesets that mark the beginning of # successive common lines, to the corresponding starting and ending number lines in both files all_couples: CplIndexToCplLines_T = {} for c_hash in sorted(common_hashes, key=operator.attrgetter("_index")): for indices_in_linesets in itertools.product( hash_to_index_1[c_hash], hash_to_index_2[c_hash] ): index_1 = indices_in_linesets[0] index_2 = indices_in_linesets[1] all_couples[LineSetStartCouple(index_1, index_2)] = ( CplSuccessiveLinesLimits( copy.copy(index_to_lines_1[index_1]), copy.copy(index_to_lines_2[index_2]), effective_cmn_lines_nb=self.namespace.min_similarity_lines, ) ) remove_successive(all_couples) for cml_stripped_l, cmn_l in all_couples.items(): start_index_1 = cml_stripped_l.fst_lineset_index start_index_2 = cml_stripped_l.snd_lineset_index nb_common_lines = cmn_l.effective_cmn_lines_nb com = Commonality( cmn_lines_nb=nb_common_lines, fst_lset=lineset1, fst_file_start=cmn_l.first_file.start, fst_file_end=cmn_l.first_file.end, snd_lset=lineset2, snd_file_start=cmn_l.second_file.start, snd_file_end=cmn_l.second_file.end, ) eff_cmn_nb = filter_noncode_lines( lineset1, start_index_1, lineset2, start_index_2, nb_common_lines ) if eff_cmn_nb > self.namespace.min_similarity_lines: yield com
_find_common
pylint
10
pylint/checkers/strings.py
def _get_quote_delimiter(string_token: str) -> str: """Returns the quote character used to delimit this token string. This function checks whether the token is a well-formed string. Args: string_token: The token to be parsed. Returns: A string containing solely the first quote delimiter character in the given string. Raises: ValueError: No quote delimiter characters are present. """
/usr/src/app/target_test_cases/failed_tests__get_quote_delimiter.txt
def _get_quote_delimiter(string_token: str) -> str: """Returns the quote character used to delimit this token string. This function checks whether the token is a well-formed string. Args: string_token: The token to be parsed. Returns: A string containing solely the first quote delimiter character in the given string. Raises: ValueError: No quote delimiter characters are present. """ match = QUOTE_DELIMITER_REGEX.match(string_token) if not match: raise ValueError(f"string token {string_token} is not a well-formed string") return match.group(2)
_get_quote_delimiter
pylint
11
pylint/checkers/variables.py
def _ignore_class_scope(self, node: nodes.NodeNG) -> bool: """Return True if the node is in a local class scope, as an assignment. Detect if we are in a local class scope, as an assignment. For example, the following is fair game. class A: b = 1 c = lambda b=b: b * b class B: tp = 1 def func(self, arg: tp): ... class C: tp = 2 def func(self, arg=tp): ... class C: class Tp: pass class D(Tp): ... """
/usr/src/app/target_test_cases/failed_tests__ignore_class_scope.txt
def _ignore_class_scope(self, node: nodes.NodeNG) -> bool: """Return True if the node is in a local class scope, as an assignment. Detect if we are in a local class scope, as an assignment. For example, the following is fair game. class A: b = 1 c = lambda b=b: b * b class B: tp = 1 def func(self, arg: tp): ... class C: tp = 2 def func(self, arg=tp): ... class C: class Tp: pass class D(Tp): ... """ name = node.name frame = node.statement().scope() in_annotation_or_default_or_decorator = self._defined_in_function_definition( node, frame ) in_ancestor_list = utils.is_ancestor_name(frame, node) if in_annotation_or_default_or_decorator or in_ancestor_list: frame_locals = frame.parent.scope().locals else: frame_locals = frame.locals return not ( (isinstance(frame, nodes.ClassDef) or in_annotation_or_default_or_decorator) and not self._in_lambda_or_comprehension_body(node, frame) and name in frame_locals )
_ignore_class_scope
pylint
12
pylint/checkers/typecheck.py
def _infer_from_metaclass_constructor( cls: nodes.ClassDef, func: nodes.FunctionDef ) -> InferenceResult | None: """Try to infer what the given *func* constructor is building. :param astroid.FunctionDef func: A metaclass constructor. Metaclass definitions can be functions, which should accept three arguments, the name of the class, the bases of the class and the attributes. The function could return anything, but usually it should be a proper metaclass. :param astroid.ClassDef cls: The class for which the *func* parameter should generate a metaclass. :returns: The class generated by the function or None, if we couldn't infer it. :rtype: astroid.ClassDef """
/usr/src/app/target_test_cases/failed_tests__infer_from_metaclass_constructor.txt
def _infer_from_metaclass_constructor( cls: nodes.ClassDef, func: nodes.FunctionDef ) -> InferenceResult | None: """Try to infer what the given *func* constructor is building. :param astroid.FunctionDef func: A metaclass constructor. Metaclass definitions can be functions, which should accept three arguments, the name of the class, the bases of the class and the attributes. The function could return anything, but usually it should be a proper metaclass. :param astroid.ClassDef cls: The class for which the *func* parameter should generate a metaclass. :returns: The class generated by the function or None, if we couldn't infer it. :rtype: astroid.ClassDef """ context = astroid.context.InferenceContext() class_bases = nodes.List() class_bases.postinit(elts=cls.bases) attrs = nodes.Dict( lineno=0, col_offset=0, parent=None, end_lineno=0, end_col_offset=0 ) local_names = [(name, values[-1]) for name, values in cls.locals.items()] attrs.postinit(local_names) builder_args = nodes.Tuple() builder_args.postinit([cls.name, class_bases, attrs]) context.callcontext = astroid.context.CallContext(builder_args) try: inferred = next(func.infer_call_result(func, context), None) except astroid.InferenceError: return None return inferred or None
_infer_from_metaclass_constructor
pylint
13
pylint/checkers/strings.py
def _is_long_string(string_token: str) -> bool: """Is this string token a "longstring" (is it triple-quoted)? Long strings are triple-quoted as defined in https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals This function only checks characters up through the open quotes. Because it's meant to be applied only to tokens that represent string literals, it doesn't bother to check for close-quotes (demonstrating that the literal is a well-formed string). Args: string_token: The string token to be parsed. Returns: A boolean representing whether this token matches a longstring regex. """
/usr/src/app/target_test_cases/failed_tests__is_long_string.txt
def _is_long_string(string_token: str) -> bool: """Is this string token a "longstring" (is it triple-quoted)? Long strings are triple-quoted as defined in https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals This function only checks characters up through the open quotes. Because it's meant to be applied only to tokens that represent string literals, it doesn't bother to check for close-quotes (demonstrating that the literal is a well-formed string). Args: string_token: The string token to be parsed. Returns: A boolean representing whether this token matches a longstring regex. """ return bool( SINGLE_QUOTED_REGEX.match(string_token) or DOUBLE_QUOTED_REGEX.match(string_token) )
_is_long_string
pylint
14
pylint/checkers/base/function_checker.py
def _node_fails_contextmanager_cleanup( node: nodes.FunctionDef, yield_nodes: list[nodes.Yield] ) -> bool: """Check if a node fails contextmanager cleanup. Current checks for a contextmanager: - only if the context manager yields a non-constant value - only if the context manager lacks a finally, or does not catch GeneratorExit - only if some statement follows the yield, some manually cleanup happens :param node: Node to check :type node: nodes.FunctionDef :return: True if fails, False otherwise :param yield_nodes: List of Yield nodes in the function body :type yield_nodes: list[nodes.Yield] :rtype: bool """
/usr/src/app/target_test_cases/failed_tests__node_fails_contextmanager_cleanup.txt
def _node_fails_contextmanager_cleanup( node: nodes.FunctionDef, yield_nodes: list[nodes.Yield] ) -> bool: """Check if a node fails contextmanager cleanup. Current checks for a contextmanager: - only if the context manager yields a non-constant value - only if the context manager lacks a finally, or does not catch GeneratorExit - only if some statement follows the yield, some manually cleanup happens :param node: Node to check :type node: nodes.FunctionDef :return: True if fails, False otherwise :param yield_nodes: List of Yield nodes in the function body :type yield_nodes: list[nodes.Yield] :rtype: bool """ def check_handles_generator_exceptions(try_node: nodes.Try) -> bool: # needs to handle either GeneratorExit, Exception, or bare except for handler in try_node.handlers: if handler.type is None: # handles all exceptions (bare except) return True inferred = utils.safe_infer(handler.type) if inferred and inferred.qname() in { "builtins.GeneratorExit", "builtins.Exception", }: return True return False # if context manager yields a non-constant value, then continue checking if any( yield_node.value is None or isinstance(yield_node.value, nodes.Const) for yield_node in yield_nodes ): return False # Check if yield expression is last statement yield_nodes = list(node.nodes_of_class(nodes.Yield)) if len(yield_nodes) == 1: n = yield_nodes[0].parent while n is not node: if n.next_sibling() is not None: break n = n.parent else: # No next statement found return False # if function body has multiple Try, filter down to the ones that have a yield node try_with_yield_nodes = [ try_node for try_node in node.nodes_of_class(nodes.Try) if any(try_node.nodes_of_class(nodes.Yield)) ] if not try_with_yield_nodes: # no try blocks at all, so checks after this line do not apply return True # if the contextmanager has a finally block, then it is fine if all(try_node.finalbody for try_node in try_with_yield_nodes): return False # if the contextmanager catches GeneratorExit, then it is fine if all( check_handles_generator_exceptions(try_node) for try_node in try_with_yield_nodes ): return False return True
_node_fails_contextmanager_cleanup
pylint
15
pylint/extensions/docparams.py
def check_arguments_in_docstring( self, doc: Docstring, arguments_node: astroid.Arguments, warning_node: astroid.NodeNG, accept_no_param_doc: bool | None = None, ) -> None: """Check that all parameters are consistent with the parameters mentioned in the parameter documentation (e.g. the Sphinx tags 'param' and 'type'). * Undocumented parameters except 'self' are noticed. * Undocumented parameter types except for 'self' and the ``*<args>`` and ``**<kwargs>`` parameters are noticed. * Parameters mentioned in the parameter documentation that don't or no longer exist in the function parameter list are noticed. * If the text "For the parameters, see" or "For the other parameters, see" (ignoring additional white-space) is mentioned in the docstring, missing parameter documentation is tolerated. * If there's no Sphinx style, Google style or NumPy style parameter documentation at all, i.e. ``:param`` is never mentioned etc., the checker assumes that the parameters are documented in another format and the absence is tolerated. :param doc: Docstring for the function, method or class. :type doc: :class:`Docstring` :param arguments_node: Arguments node for the function, method or class constructor. :type arguments_node: :class:`astroid.scoped_nodes.Arguments` :param warning_node: The node to assign the warnings to :type warning_node: :class:`astroid.scoped_nodes.Node` :param accept_no_param_doc: Whether to allow no parameters to be documented. If None then this value is read from the configuration. :type accept_no_param_doc: bool or None """
/usr/src/app/target_test_cases/failed_tests_check_arguments_in_docstring.txt
def check_arguments_in_docstring( self, doc: Docstring, arguments_node: astroid.Arguments, warning_node: astroid.NodeNG, accept_no_param_doc: bool | None = None, ) -> None: """Check that all parameters are consistent with the parameters mentioned in the parameter documentation (e.g. the Sphinx tags 'param' and 'type'). * Undocumented parameters except 'self' are noticed. * Undocumented parameter types except for 'self' and the ``*<args>`` and ``**<kwargs>`` parameters are noticed. * Parameters mentioned in the parameter documentation that don't or no longer exist in the function parameter list are noticed. * If the text "For the parameters, see" or "For the other parameters, see" (ignoring additional white-space) is mentioned in the docstring, missing parameter documentation is tolerated. * If there's no Sphinx style, Google style or NumPy style parameter documentation at all, i.e. ``:param`` is never mentioned etc., the checker assumes that the parameters are documented in another format and the absence is tolerated. :param doc: Docstring for the function, method or class. :type doc: :class:`Docstring` :param arguments_node: Arguments node for the function, method or class constructor. :type arguments_node: :class:`astroid.scoped_nodes.Arguments` :param warning_node: The node to assign the warnings to :type warning_node: :class:`astroid.scoped_nodes.Node` :param accept_no_param_doc: Whether to allow no parameters to be documented. If None then this value is read from the configuration. :type accept_no_param_doc: bool or None """ # Tolerate missing param or type declarations if there is a link to # another method carrying the same name. if not doc.doc: return if accept_no_param_doc is None: accept_no_param_doc = self.linter.config.accept_no_param_doc tolerate_missing_params = doc.params_documented_elsewhere() # Collect the function arguments. expected_argument_names = {arg.name for arg in arguments_node.args} expected_argument_names.update( a.name for a in arguments_node.posonlyargs + arguments_node.kwonlyargs ) not_needed_type_in_docstring = self.not_needed_param_in_docstring.copy() expected_but_ignored_argument_names = set() ignored_argument_names = self.linter.config.ignored_argument_names if ignored_argument_names: expected_but_ignored_argument_names = { arg for arg in expected_argument_names if ignored_argument_names.match(arg) } if arguments_node.vararg is not None: expected_argument_names.add(f"*{arguments_node.vararg}") not_needed_type_in_docstring.add(f"*{arguments_node.vararg}") if arguments_node.kwarg is not None: expected_argument_names.add(f"**{arguments_node.kwarg}") not_needed_type_in_docstring.add(f"**{arguments_node.kwarg}") params_with_doc, params_with_type = doc.match_param_docs() # Tolerate no parameter documentation at all. if not params_with_doc and not params_with_type and accept_no_param_doc: tolerate_missing_params = True # This is before the update of params_with_type because this must check only # the type documented in a docstring, not the one using pep484 # See #4117 and #4593 self._compare_ignored_args( params_with_type, "useless-type-doc", expected_but_ignored_argument_names, warning_node, ) params_with_type |= utils.args_with_annotation(arguments_node) if not tolerate_missing_params: missing_param_doc = (expected_argument_names - params_with_doc) - ( self.not_needed_param_in_docstring | expected_but_ignored_argument_names ) missing_type_doc = (expected_argument_names - params_with_type) - ( not_needed_type_in_docstring | expected_but_ignored_argument_names ) if ( missing_param_doc == expected_argument_names == missing_type_doc and len(expected_argument_names) != 0 ): self.add_message( "missing-any-param-doc", args=(warning_node.name,), node=warning_node, confidence=HIGH, ) else: self._compare_missing_args( params_with_doc, "missing-param-doc", self.not_needed_param_in_docstring | expected_but_ignored_argument_names, expected_argument_names, warning_node, ) self._compare_missing_args( params_with_type, "missing-type-doc", not_needed_type_in_docstring | expected_but_ignored_argument_names, expected_argument_names, warning_node, ) self._compare_different_args( params_with_doc, "differing-param-doc", self.not_needed_param_in_docstring, expected_argument_names, warning_node, ) self._compare_different_args( params_with_type, "differing-type-doc", not_needed_type_in_docstring, expected_argument_names, warning_node, ) self._compare_ignored_args( params_with_doc, "useless-param-doc", expected_but_ignored_argument_names, warning_node, )
check_arguments_in_docstring
pylint
16
pylint/testutils/utils.py
def create_files(paths: list[str], chroot: str = ".") -> None: """Creates directories and files found in <path>. :param list paths: list of relative paths to files or directories :param str chroot: the root directory in which paths will be created >>> from os.path import isdir, isfile >>> isdir('/tmp/a') False >>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp') >>> isdir('/tmp/a') True >>> isdir('/tmp/a/b/c') True >>> isfile('/tmp/a/b/c/d/e.py') True >>> isfile('/tmp/a/b/foo.py') True """
/usr/src/app/target_test_cases/failed_tests_create_files.txt
def create_files(paths: list[str], chroot: str = ".") -> None: """Creates directories and files found in <path>. :param list paths: list of relative paths to files or directories :param str chroot: the root directory in which paths will be created >>> from os.path import isdir, isfile >>> isdir('/tmp/a') False >>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp') >>> isdir('/tmp/a') True >>> isdir('/tmp/a/b/c') True >>> isfile('/tmp/a/b/c/d/e.py') True >>> isfile('/tmp/a/b/foo.py') True """ dirs, files = set(), set() for path in paths: path = os.path.join(chroot, path) filename = os.path.basename(path) # path is a directory path if not filename: dirs.add(path) # path is a filename path else: dirs.add(os.path.dirname(path)) files.add(path) for dirpath in dirs: if not os.path.isdir(dirpath): os.makedirs(dirpath) for filepath in files: with open(filepath, "w", encoding="utf-8"): pass
create_files
pylint
17
pylint/checkers/symilar.py
def filter_noncode_lines( ls_1: LineSet, stindex_1: Index, ls_2: LineSet, stindex_2: Index, common_lines_nb: int, ) -> int: """Return the effective number of common lines between lineset1 and lineset2 filtered from non code lines. That is to say the number of common successive stripped lines except those that do not contain code (for example a line with only an ending parenthesis) :param ls_1: first lineset :param stindex_1: first lineset starting index :param ls_2: second lineset :param stindex_2: second lineset starting index :param common_lines_nb: number of common successive stripped lines before being filtered from non code lines :return: the number of common successive stripped lines that contain code """
/usr/src/app/target_test_cases/failed_tests_filter_noncode_lines.txt
def filter_noncode_lines( ls_1: LineSet, stindex_1: Index, ls_2: LineSet, stindex_2: Index, common_lines_nb: int, ) -> int: """Return the effective number of common lines between lineset1 and lineset2 filtered from non code lines. That is to say the number of common successive stripped lines except those that do not contain code (for example a line with only an ending parenthesis) :param ls_1: first lineset :param stindex_1: first lineset starting index :param ls_2: second lineset :param stindex_2: second lineset starting index :param common_lines_nb: number of common successive stripped lines before being filtered from non code lines :return: the number of common successive stripped lines that contain code """ stripped_l1 = [ lspecif.text for lspecif in ls_1.stripped_lines[stindex_1 : stindex_1 + common_lines_nb] if REGEX_FOR_LINES_WITH_CONTENT.match(lspecif.text) ] stripped_l2 = [ lspecif.text for lspecif in ls_2.stripped_lines[stindex_2 : stindex_2 + common_lines_nb] if REGEX_FOR_LINES_WITH_CONTENT.match(lspecif.text) ] return sum(sline_1 == sline_2 for sline_1, sline_2 in zip(stripped_l1, stripped_l2))
filter_noncode_lines
pylint
18
pylint/checkers/utils.py
def get_argument_from_call( call_node: nodes.Call, position: int | None = None, keyword: str | None = None ) -> nodes.Name: """Returns the specified argument from a function call. :param nodes.Call call_node: Node representing a function call to check. :param int position: position of the argument. :param str keyword: the keyword of the argument. :returns: The node representing the argument, None if the argument is not found. :rtype: nodes.Name :raises ValueError: if both position and keyword are None. :raises NoSuchArgumentError: if no argument at the provided position or with the provided keyword. """
/usr/src/app/target_test_cases/failed_tests_get_argument_from_call.txt
def get_argument_from_call( call_node: nodes.Call, position: int | None = None, keyword: str | None = None ) -> nodes.Name: """Returns the specified argument from a function call. :param nodes.Call call_node: Node representing a function call to check. :param int position: position of the argument. :param str keyword: the keyword of the argument. :returns: The node representing the argument, None if the argument is not found. :rtype: nodes.Name :raises ValueError: if both position and keyword are None. :raises NoSuchArgumentError: if no argument at the provided position or with the provided keyword. """ if position is None and keyword is None: raise ValueError("Must specify at least one of: position or keyword.") if position is not None: try: return call_node.args[position] except IndexError: pass if keyword and call_node.keywords: for arg in call_node.keywords: if arg.arg == keyword: return arg.value raise NoSuchArgumentError
get_argument_from_call
pylint
19
pylint/checkers/utils.py
def get_import_name(importnode: ImportNode, modname: str | None) -> str | None: """Get a prepared module name from the given import node. In the case of relative imports, this will return the absolute qualified module name, which might be useful for debugging. Otherwise, the initial module name is returned unchanged. :param importnode: node representing import statement. :param modname: module name from import statement. :returns: absolute qualified module name of the module used in import. """
/usr/src/app/target_test_cases/failed_tests_get_import_name.txt
def get_import_name(importnode: ImportNode, modname: str | None) -> str | None: """Get a prepared module name from the given import node. In the case of relative imports, this will return the absolute qualified module name, which might be useful for debugging. Otherwise, the initial module name is returned unchanged. :param importnode: node representing import statement. :param modname: module name from import statement. :returns: absolute qualified module name of the module used in import. """ if isinstance(importnode, nodes.ImportFrom) and importnode.level: root = importnode.root() if isinstance(root, nodes.Module): try: return root.relative_to_absolute_name( # type: ignore[no-any-return] modname, level=importnode.level ) except TooManyLevelsError: return modname return modname
get_import_name
pylint
20
pylint/checkers/symilar.py
def hash_lineset( lineset: LineSet, min_common_lines: int = DEFAULT_MIN_SIMILARITY_LINE ) -> tuple[HashToIndex_T, IndexToLines_T]: """Return two dicts. The first associates the hash of successive stripped lines of a lineset to the indices of the starting lines. The second dict, associates the index of the starting line in the lineset's stripped lines to the couple [start, end] lines number in the corresponding file. :param lineset: lineset object (i.e the lines in a file) :param min_common_lines: number of successive lines that are used to compute the hash :return: a dict linking hashes to corresponding start index and a dict that links this index to the start and end lines in the file """
/usr/src/app/target_test_cases/failed_tests_hash_lineset.txt
def hash_lineset( lineset: LineSet, min_common_lines: int = DEFAULT_MIN_SIMILARITY_LINE ) -> tuple[HashToIndex_T, IndexToLines_T]: """Return two dicts. The first associates the hash of successive stripped lines of a lineset to the indices of the starting lines. The second dict, associates the index of the starting line in the lineset's stripped lines to the couple [start, end] lines number in the corresponding file. :param lineset: lineset object (i.e the lines in a file) :param min_common_lines: number of successive lines that are used to compute the hash :return: a dict linking hashes to corresponding start index and a dict that links this index to the start and end lines in the file """ hash2index = defaultdict(list) index2lines = {} # Comments, docstring and other specific patterns maybe excluded -> call to stripped_lines # to get only what is desired lines = tuple(x.text for x in lineset.stripped_lines) # Need different iterators on same lines but each one is shifted 1 from the precedent shifted_lines = [iter(lines[i:]) for i in range(min_common_lines)] for i, *succ_lines in enumerate(zip(*shifted_lines)): start_linenumber = LineNumber(lineset.stripped_lines[i].line_number) try: end_linenumber = lineset.stripped_lines[i + min_common_lines].line_number except IndexError: end_linenumber = LineNumber(lineset.stripped_lines[-1].line_number + 1) index = Index(i) index2lines[index] = SuccessiveLinesLimits( start=start_linenumber, end=end_linenumber ) l_c = LinesChunk(lineset.name, index, *succ_lines) hash2index[l_c].append(index) return hash2index, index2lines
hash_lineset
pylint
21
pylint/lint/message_state_handler.py
def is_message_enabled( self, msg_descr: str, line: int | None = None, confidence: interfaces.Confidence | None = None, ) -> bool: """Is this message enabled for the current file ? Optionally, is it enabled for this line and confidence level ? The current file is implicit and mandatory. As a result this function can't be cached right now as the line is the line of the currently analysed file (self.file_state), if it changes, then the result for the same msg_descr/line might need to change. :param msg_descr: Either the msgid or the symbol for a MessageDefinition :param line: The line of the currently analysed file :param confidence: The confidence of the message """
/usr/src/app/target_test_cases/failed_tests_is_message_enabled.txt
def is_message_enabled( self, msg_descr: str, line: int | None = None, confidence: interfaces.Confidence | None = None, ) -> bool: """Is this message enabled for the current file ? Optionally, is it enabled for this line and confidence level ? The current file is implicit and mandatory. As a result this function can't be cached right now as the line is the line of the currently analysed file (self.file_state), if it changes, then the result for the same msg_descr/line might need to change. :param msg_descr: Either the msgid or the symbol for a MessageDefinition :param line: The line of the currently analysed file :param confidence: The confidence of the message """ if confidence and confidence.name not in self.linter.config.confidence: return False try: msgids = self.linter.msgs_store.message_id_store.get_active_msgids( msg_descr ) except exceptions.UnknownMessageError: # The linter checks for messages that are not registered # due to version mismatch, just treat them as message IDs # for now. msgids = [msg_descr] return any(self._is_one_message_enabled(msgid, line) for msgid in msgids)
is_message_enabled
pylint
22
pylint/__init__.py
def modify_sys_path() -> None: """Modify sys path for execution as Python module. Strip out the current working directory from sys.path. Having the working directory in `sys.path` means that `pylint` might inadvertently import user code from modules having the same name as stdlib or pylint's own modules. CPython issue: https://bugs.python.org/issue33053 - Remove the first entry. This will always be either "" or the working directory - Remove the working directory from the second and third entries if PYTHONPATH includes a ":" at the beginning or the end. https://github.com/pylint-dev/pylint/issues/3636 Don't remove it if PYTHONPATH contains the cwd or '.' as the entry will only be added once. - Don't remove the working directory from the rest. It will be included if pylint is installed in an editable configuration (as the last item). https://github.com/pylint-dev/pylint/issues/4161 """
/usr/src/app/target_test_cases/failed_tests_modify_sys_path.txt
def modify_sys_path() -> None: """Modify sys path for execution as Python module. Strip out the current working directory from sys.path. Having the working directory in `sys.path` means that `pylint` might inadvertently import user code from modules having the same name as stdlib or pylint's own modules. CPython issue: https://bugs.python.org/issue33053 - Remove the first entry. This will always be either "" or the working directory - Remove the working directory from the second and third entries if PYTHONPATH includes a ":" at the beginning or the end. https://github.com/pylint-dev/pylint/issues/3636 Don't remove it if PYTHONPATH contains the cwd or '.' as the entry will only be added once. - Don't remove the working directory from the rest. It will be included if pylint is installed in an editable configuration (as the last item). https://github.com/pylint-dev/pylint/issues/4161 """ cwd = os.getcwd() if sys.path[0] in ("", ".", cwd): sys.path.pop(0) env_pythonpath = os.environ.get("PYTHONPATH", "") if env_pythonpath.startswith(":") and env_pythonpath not in (f":{cwd}", ":."): sys.path.pop(0) elif env_pythonpath.endswith(":") and env_pythonpath not in (f"{cwd}:", ".:"): sys.path.pop(1)
modify_sys_path
pylint
23
pylint/checkers/symilar.py
def remove_successive(all_couples: CplIndexToCplLines_T) -> None: """Removes all successive entries in the dictionary in argument. :param all_couples: collection that has to be cleaned up from successive entries. The keys are couples of indices that mark the beginning of common entries in both linesets. The values have two parts. The first one is the couple of starting and ending line numbers of common successive lines in the first file. The second part is the same for the second file. For example consider the following dict: >>> all_couples {(11, 34): ([5, 9], [27, 31]), (23, 79): ([15, 19], [45, 49]), (12, 35): ([6, 10], [28, 32])} There are two successive keys (11, 34) and (12, 35). It means there are two consecutive similar chunks of lines in both files. Thus remove last entry and update the last line numbers in the first entry >>> remove_successive(all_couples) >>> all_couples {(11, 34): ([5, 10], [27, 32]), (23, 79): ([15, 19], [45, 49])} """
/usr/src/app/target_test_cases/failed_tests_remove_successive.txt
def remove_successive(all_couples: CplIndexToCplLines_T) -> None: """Removes all successive entries in the dictionary in argument. :param all_couples: collection that has to be cleaned up from successive entries. The keys are couples of indices that mark the beginning of common entries in both linesets. The values have two parts. The first one is the couple of starting and ending line numbers of common successive lines in the first file. The second part is the same for the second file. For example consider the following dict: >>> all_couples {(11, 34): ([5, 9], [27, 31]), (23, 79): ([15, 19], [45, 49]), (12, 35): ([6, 10], [28, 32])} There are two successive keys (11, 34) and (12, 35). It means there are two consecutive similar chunks of lines in both files. Thus remove last entry and update the last line numbers in the first entry >>> remove_successive(all_couples) >>> all_couples {(11, 34): ([5, 10], [27, 32]), (23, 79): ([15, 19], [45, 49])} """ couple: LineSetStartCouple for couple in tuple(all_couples.keys()): to_remove = [] test = couple.increment(Index(1)) while test in all_couples: all_couples[couple].first_file.end = all_couples[test].first_file.end all_couples[couple].second_file.end = all_couples[test].second_file.end all_couples[couple].effective_cmn_lines_nb += 1 to_remove.append(test) test = test.increment(Index(1)) for target in to_remove: try: all_couples.pop(target) except KeyError: pass
remove_successive
pylint
24
pylint/lint/pylinter.py
def should_analyze_file(modname: str, path: str, is_argument: bool = False) -> bool: """Returns whether a module should be checked. This implementation returns True for all python source files (.py and .pyi), indicating that all files should be linted. Subclasses may override this method to indicate that modules satisfying certain conditions should not be linted. :param str modname: The name of the module to be checked. :param str path: The full path to the source code of the module. :param bool is_argument: Whether the file is an argument to pylint or not. Files which respect this property are always checked, since the user requested it explicitly. :returns: True if the module should be checked. """
/usr/src/app/target_test_cases/failed_tests_should_analyze_file.txt
def should_analyze_file(modname: str, path: str, is_argument: bool = False) -> bool: """Returns whether a module should be checked. This implementation returns True for all python source files (.py and .pyi), indicating that all files should be linted. Subclasses may override this method to indicate that modules satisfying certain conditions should not be linted. :param str modname: The name of the module to be checked. :param str path: The full path to the source code of the module. :param bool is_argument: Whether the file is an argument to pylint or not. Files which respect this property are always checked, since the user requested it explicitly. :returns: True if the module should be checked. """ if is_argument: return True return path.endswith((".py", ".pyi"))
should_analyze_file
pylint
25
pylint/checkers/symilar.py
def stripped_lines( lines: Iterable[str], ignore_comments: bool, ignore_docstrings: bool, ignore_imports: bool, ignore_signatures: bool, line_enabled_callback: Callable[[str, int], bool] | None = None, ) -> list[LineSpecifs]: """Return tuples of line/line number/line type with leading/trailing white-space and any ignored code features removed. :param lines: a collection of lines :param ignore_comments: if true, any comment in the lines collection is removed from the result :param ignore_docstrings: if true, any line that is a docstring is removed from the result :param ignore_imports: if true, any line that is an import is removed from the result :param ignore_signatures: if true, any line that is part of a function signature is removed from the result :param line_enabled_callback: If called with "R0801" and a line number, a return value of False will disregard the line :return: the collection of line/line number/line type tuples """
/usr/src/app/target_test_cases/failed_tests_stripped_lines.txt
def stripped_lines( lines: Iterable[str], ignore_comments: bool, ignore_docstrings: bool, ignore_imports: bool, ignore_signatures: bool, line_enabled_callback: Callable[[str, int], bool] | None = None, ) -> list[LineSpecifs]: """Return tuples of line/line number/line type with leading/trailing white-space and any ignored code features removed. :param lines: a collection of lines :param ignore_comments: if true, any comment in the lines collection is removed from the result :param ignore_docstrings: if true, any line that is a docstring is removed from the result :param ignore_imports: if true, any line that is an import is removed from the result :param ignore_signatures: if true, any line that is part of a function signature is removed from the result :param line_enabled_callback: If called with "R0801" and a line number, a return value of False will disregard the line :return: the collection of line/line number/line type tuples """ ignore_lines: set[int] = set() if ignore_imports or ignore_signatures: tree = astroid.parse("".join(lines)) if ignore_imports: ignore_lines.update( chain.from_iterable( range(node.lineno, (node.end_lineno or node.lineno) + 1) for node in tree.nodes_of_class((nodes.Import, nodes.ImportFrom)) ) ) if ignore_signatures: def _get_functions( functions: list[nodes.NodeNG], tree: nodes.NodeNG ) -> list[nodes.NodeNG]: """Recursively get all functions including nested in the classes from the. tree. """ for node in tree.body: if isinstance(node, (nodes.FunctionDef, nodes.AsyncFunctionDef)): functions.append(node) if isinstance( node, (nodes.ClassDef, nodes.FunctionDef, nodes.AsyncFunctionDef), ): _get_functions(functions, node) return functions functions = _get_functions([], tree) ignore_lines.update( chain.from_iterable( range( func.lineno, func.body[0].lineno if func.body else func.tolineno + 1, ) for func in functions ) ) strippedlines = [] docstring = None for lineno, line in enumerate(lines, start=1): if line_enabled_callback is not None and not line_enabled_callback( "R0801", lineno ): continue line = line.strip() if ignore_docstrings: if not docstring: if line.startswith(('"""', "'''")): docstring = line[:3] line = line[3:] elif line.startswith(('r"""', "r'''")): docstring = line[1:4] line = line[4:] if docstring: if line.endswith(docstring): docstring = None line = "" if ignore_comments: line = line.split("#", 1)[0].strip() if lineno in ignore_lines: line = "" if line: strippedlines.append( LineSpecifs(text=line, line_number=LineNumber(lineno - 1)) ) return strippedlines
stripped_lines
sympy
0
sympy/physics/continuum_mechanics/beam.py
def apply_load(self, value, start, order, end=None): """ This method adds up the loads given to a particular beam object. Parameters ========== value : Sympifyable The value inserted should have the units [Force/(Distance**(n+1)] where n is the order of applied load. Units for applied loads: - For moments, unit = kN*m - For point loads, unit = kN - For constant distributed load, unit = kN/m - For ramp loads, unit = kN/m/m - For parabolic ramp loads, unit = kN/m/m/m - ... so on. start : Sympifyable The starting point of the applied load. For point moments and point forces this is the location of application. order : Integer The order of the applied load. - For moments, order = -2 - For point loads, order =-1 - For constant distributed load, order = 0 - For ramp loads, order = 1 - For parabolic ramp loads, order = 2 - ... so on. end : Sympifyable, optional An optional argument that can be used if the load has an end point within the length of the beam. Examples ======== There is a beam of length 4 meters. A moment of magnitude 3 Nm is applied in the clockwise direction at the starting point of the beam. A point load of magnitude 4 N is applied from the top of the beam at 2 meters from the starting point and a parabolic ramp load of magnitude 2 N/m is applied below the beam starting from 2 meters to 3 meters away from the starting point of the beam. >>> from sympy.physics.continuum_mechanics.beam import Beam >>> from sympy import symbols >>> E, I = symbols('E, I') >>> b = Beam(4, E, I) >>> b.apply_load(-3, 0, -2) >>> b.apply_load(4, 2, -1) >>> b.apply_load(-2, 2, 2, end=3) >>> b.load -3*SingularityFunction(x, 0, -2) + 4*SingularityFunction(x, 2, -1) - 2*SingularityFunction(x, 2, 2) + 2*SingularityFunction(x, 3, 0) + 4*SingularityFunction(x, 3, 1) + 2*SingularityFunction(x, 3, 2) """
/usr/src/app/target_test_cases/failed_tests_Beam.apply_load.txt
def apply_load(self, value, start, order, end=None): """ This method adds up the loads given to a particular beam object. Parameters ========== value : Sympifyable The value inserted should have the units [Force/(Distance**(n+1)] where n is the order of applied load. Units for applied loads: - For moments, unit = kN*m - For point loads, unit = kN - For constant distributed load, unit = kN/m - For ramp loads, unit = kN/m/m - For parabolic ramp loads, unit = kN/m/m/m - ... so on. start : Sympifyable The starting point of the applied load. For point moments and point forces this is the location of application. order : Integer The order of the applied load. - For moments, order = -2 - For point loads, order =-1 - For constant distributed load, order = 0 - For ramp loads, order = 1 - For parabolic ramp loads, order = 2 - ... so on. end : Sympifyable, optional An optional argument that can be used if the load has an end point within the length of the beam. Examples ======== There is a beam of length 4 meters. A moment of magnitude 3 Nm is applied in the clockwise direction at the starting point of the beam. A point load of magnitude 4 N is applied from the top of the beam at 2 meters from the starting point and a parabolic ramp load of magnitude 2 N/m is applied below the beam starting from 2 meters to 3 meters away from the starting point of the beam. >>> from sympy.physics.continuum_mechanics.beam import Beam >>> from sympy import symbols >>> E, I = symbols('E, I') >>> b = Beam(4, E, I) >>> b.apply_load(-3, 0, -2) >>> b.apply_load(4, 2, -1) >>> b.apply_load(-2, 2, 2, end=3) >>> b.load -3*SingularityFunction(x, 0, -2) + 4*SingularityFunction(x, 2, -1) - 2*SingularityFunction(x, 2, 2) + 2*SingularityFunction(x, 3, 0) + 4*SingularityFunction(x, 3, 1) + 2*SingularityFunction(x, 3, 2) """ x = self.variable value = sympify(value) start = sympify(start) order = sympify(order) self._applied_loads.append((value, start, order, end)) self._load += value*SingularityFunction(x, start, order) self._original_load += value*SingularityFunction(x, start, order) if end: # load has an end point within the length of the beam. self._handle_end(x, value, start, order, end, type="apply")
Beam.apply_load
sympy
1
sympy/physics/continuum_mechanics/beam.py
def apply_rotation_hinge(self, loc): """ This method applies a rotation hinge at a single location on the beam. Parameters ---------- loc : Sympifyable Location of point at which hinge is applied. Returns ======= Symbol The unknown rotation jump multiplied by the elastic modulus and second moment as a symbol. Examples ======== There is a beam of length 15 meters. Pin supports are placed at distances of 0 and 10 meters. There is a fixed support at the end. There are two rotation hinges in the structure, one at 5 meters and one at 10 meters. A pointload of magnitude 10 kN is applied on the hinge at 5 meters. A distributed load of 5 kN works on the structure from 10 meters to the end. Using the sign convention of upward forces and clockwise moment being positive. >>> from sympy.physics.continuum_mechanics.beam import Beam >>> from sympy import Symbol >>> E = Symbol('E') >>> I = Symbol('I') >>> b = Beam(15, E, I) >>> r0 = b.apply_support(0, type='pin') >>> r10 = b.apply_support(10, type='pin') >>> r15, m15 = b.apply_support(15, type='fixed') >>> p5 = b.apply_rotation_hinge(5) >>> p12 = b.apply_rotation_hinge(12) >>> b.apply_load(-10, 5, -1) >>> b.apply_load(-5, 10, 0, 15) >>> b.solve_for_reaction_loads(r0, r10, r15, m15) >>> b.reaction_loads {M_15: -75/2, R_0: 0, R_10: 40, R_15: -5} >>> b.rotation_jumps {P_12: -1875/(16*E*I), P_5: 9625/(24*E*I)} >>> b.rotation_jumps[p12] -1875/(16*E*I) >>> b.bending_moment() -9625*SingularityFunction(x, 5, -1)/24 + 10*SingularityFunction(x, 5, 1) - 40*SingularityFunction(x, 10, 1) + 5*SingularityFunction(x, 10, 2)/2 + 1875*SingularityFunction(x, 12, -1)/16 + 75*SingularityFunction(x, 15, 0)/2 + 5*SingularityFunction(x, 15, 1) - 5*SingularityFunction(x, 15, 2)/2 """
/usr/src/app/target_test_cases/failed_tests_Beam.apply_rotation_hinge.txt
def apply_rotation_hinge(self, loc): """ This method applies a rotation hinge at a single location on the beam. Parameters ---------- loc : Sympifyable Location of point at which hinge is applied. Returns ======= Symbol The unknown rotation jump multiplied by the elastic modulus and second moment as a symbol. Examples ======== There is a beam of length 15 meters. Pin supports are placed at distances of 0 and 10 meters. There is a fixed support at the end. There are two rotation hinges in the structure, one at 5 meters and one at 10 meters. A pointload of magnitude 10 kN is applied on the hinge at 5 meters. A distributed load of 5 kN works on the structure from 10 meters to the end. Using the sign convention of upward forces and clockwise moment being positive. >>> from sympy.physics.continuum_mechanics.beam import Beam >>> from sympy import Symbol >>> E = Symbol('E') >>> I = Symbol('I') >>> b = Beam(15, E, I) >>> r0 = b.apply_support(0, type='pin') >>> r10 = b.apply_support(10, type='pin') >>> r15, m15 = b.apply_support(15, type='fixed') >>> p5 = b.apply_rotation_hinge(5) >>> p12 = b.apply_rotation_hinge(12) >>> b.apply_load(-10, 5, -1) >>> b.apply_load(-5, 10, 0, 15) >>> b.solve_for_reaction_loads(r0, r10, r15, m15) >>> b.reaction_loads {M_15: -75/2, R_0: 0, R_10: 40, R_15: -5} >>> b.rotation_jumps {P_12: -1875/(16*E*I), P_5: 9625/(24*E*I)} >>> b.rotation_jumps[p12] -1875/(16*E*I) >>> b.bending_moment() -9625*SingularityFunction(x, 5, -1)/24 + 10*SingularityFunction(x, 5, 1) - 40*SingularityFunction(x, 10, 1) + 5*SingularityFunction(x, 10, 2)/2 + 1875*SingularityFunction(x, 12, -1)/16 + 75*SingularityFunction(x, 15, 0)/2 + 5*SingularityFunction(x, 15, 1) - 5*SingularityFunction(x, 15, 2)/2 """ loc = sympify(loc) E = self.elastic_modulus I = self._get_I(loc) rotation_jump = Symbol('P_'+str(loc)) self._applied_rotation_hinges.append(loc) self._rotation_hinge_symbols.append(rotation_jump) self.apply_load(E * I * rotation_jump, loc, -3) self.bc_bending_moment.append((loc, 0)) return rotation_jump
Beam.apply_rotation_hinge
sympy
2
sympy/physics/continuum_mechanics/beam.py
def apply_support(self, loc, type="fixed"): """ This method applies support to a particular beam object and returns the symbol of the unknown reaction load(s). Parameters ========== loc : Sympifyable Location of point at which support is applied. type : String Determines type of Beam support applied. To apply support structure with - zero degree of freedom, type = "fixed" - one degree of freedom, type = "pin" - two degrees of freedom, type = "roller" Returns ======= Symbol or tuple of Symbol The unknown reaction load as a symbol. - Symbol(reaction_force) if type = "pin" or "roller" - Symbol(reaction_force), Symbol(reaction_moment) if type = "fixed" Examples ======== There is a beam of length 20 meters. A moment of magnitude 100 Nm is applied in the clockwise direction at the end of the beam. A pointload of magnitude 8 N is applied from the top of the beam at a distance of 10 meters. There is one fixed support at the start of the beam and a roller at the end. Using the sign convention of upward forces and clockwise moment being positive. >>> from sympy.physics.continuum_mechanics.beam import Beam >>> from sympy import symbols >>> E, I = symbols('E, I') >>> b = Beam(20, E, I) >>> p0, m0 = b.apply_support(0, 'fixed') >>> p1 = b.apply_support(20, 'roller') >>> b.apply_load(-8, 10, -1) >>> b.apply_load(100, 20, -2) >>> b.solve_for_reaction_loads(p0, m0, p1) >>> b.reaction_loads {M_0: 20, R_0: -2, R_20: 10} >>> b.reaction_loads[p0] -2 >>> b.load 20*SingularityFunction(x, 0, -2) - 2*SingularityFunction(x, 0, -1) - 8*SingularityFunction(x, 10, -1) + 100*SingularityFunction(x, 20, -2) + 10*SingularityFunction(x, 20, -1) """
/usr/src/app/target_test_cases/failed_tests_Beam.apply_support.txt
def apply_support(self, loc, type="fixed"): """ This method applies support to a particular beam object and returns the symbol of the unknown reaction load(s). Parameters ========== loc : Sympifyable Location of point at which support is applied. type : String Determines type of Beam support applied. To apply support structure with - zero degree of freedom, type = "fixed" - one degree of freedom, type = "pin" - two degrees of freedom, type = "roller" Returns ======= Symbol or tuple of Symbol The unknown reaction load as a symbol. - Symbol(reaction_force) if type = "pin" or "roller" - Symbol(reaction_force), Symbol(reaction_moment) if type = "fixed" Examples ======== There is a beam of length 20 meters. A moment of magnitude 100 Nm is applied in the clockwise direction at the end of the beam. A pointload of magnitude 8 N is applied from the top of the beam at a distance of 10 meters. There is one fixed support at the start of the beam and a roller at the end. Using the sign convention of upward forces and clockwise moment being positive. >>> from sympy.physics.continuum_mechanics.beam import Beam >>> from sympy import symbols >>> E, I = symbols('E, I') >>> b = Beam(20, E, I) >>> p0, m0 = b.apply_support(0, 'fixed') >>> p1 = b.apply_support(20, 'roller') >>> b.apply_load(-8, 10, -1) >>> b.apply_load(100, 20, -2) >>> b.solve_for_reaction_loads(p0, m0, p1) >>> b.reaction_loads {M_0: 20, R_0: -2, R_20: 10} >>> b.reaction_loads[p0] -2 >>> b.load 20*SingularityFunction(x, 0, -2) - 2*SingularityFunction(x, 0, -1) - 8*SingularityFunction(x, 10, -1) + 100*SingularityFunction(x, 20, -2) + 10*SingularityFunction(x, 20, -1) """ loc = sympify(loc) self._applied_supports.append((loc, type)) if type in ("pin", "roller"): reaction_load = Symbol('R_'+str(loc)) self.apply_load(reaction_load, loc, -1) self.bc_deflection.append((loc, 0)) else: reaction_load = Symbol('R_'+str(loc)) reaction_moment = Symbol('M_'+str(loc)) self.apply_load(reaction_load, loc, -1) self.apply_load(reaction_moment, loc, -2) self.bc_deflection.append((loc, 0)) self.bc_slope.append((loc, 0)) self._support_as_loads.append((reaction_moment, loc, -2, None)) self._support_as_loads.append((reaction_load, loc, -1, None)) if type in ("pin", "roller"): return reaction_load else: return reaction_load, reaction_moment
Beam.apply_support
sympy
3
sympy/matrices/expressions/blockmatrix.py
def schur(self, mat = 'A', generalized = False): """Return the Schur Complement of the 2x2 BlockMatrix Parameters ========== mat : String, optional The matrix with respect to which the Schur Complement is calculated. 'A' is used by default generalized : bool, optional If True, returns the generalized Schur Component which uses Moore-Penrose Inverse Examples ======== >>> from sympy import symbols, MatrixSymbol, BlockMatrix >>> m, n = symbols('m n') >>> A = MatrixSymbol('A', n, n) >>> B = MatrixSymbol('B', n, m) >>> C = MatrixSymbol('C', m, n) >>> D = MatrixSymbol('D', m, m) >>> X = BlockMatrix([[A, B], [C, D]]) The default Schur Complement is evaluated with "A" >>> X.schur() -C*A**(-1)*B + D >>> X.schur('D') A - B*D**(-1)*C Schur complement with non-invertible matrices is not defined. Instead, the generalized Schur complement can be calculated which uses the Moore-Penrose Inverse. To achieve this, `generalized` must be set to `True` >>> X.schur('B', generalized=True) C - D*(B.T*B)**(-1)*B.T*A >>> X.schur('C', generalized=True) -A*(C.T*C)**(-1)*C.T*D + B Returns ======= M : Matrix The Schur Complement Matrix Raises ====== ShapeError If the block matrix is not a 2x2 matrix NonInvertibleMatrixError If given matrix is non-invertible References ========== .. [1] Wikipedia Article on Schur Component : https://en.wikipedia.org/wiki/Schur_complement See Also ======== sympy.matrices.matrixbase.MatrixBase.pinv """
/usr/src/app/target_test_cases/failed_tests_BlockMatrix.schur.txt
def schur(self, mat = 'A', generalized = False): """Return the Schur Complement of the 2x2 BlockMatrix Parameters ========== mat : String, optional The matrix with respect to which the Schur Complement is calculated. 'A' is used by default generalized : bool, optional If True, returns the generalized Schur Component which uses Moore-Penrose Inverse Examples ======== >>> from sympy import symbols, MatrixSymbol, BlockMatrix >>> m, n = symbols('m n') >>> A = MatrixSymbol('A', n, n) >>> B = MatrixSymbol('B', n, m) >>> C = MatrixSymbol('C', m, n) >>> D = MatrixSymbol('D', m, m) >>> X = BlockMatrix([[A, B], [C, D]]) The default Schur Complement is evaluated with "A" >>> X.schur() -C*A**(-1)*B + D >>> X.schur('D') A - B*D**(-1)*C Schur complement with non-invertible matrices is not defined. Instead, the generalized Schur complement can be calculated which uses the Moore-Penrose Inverse. To achieve this, `generalized` must be set to `True` >>> X.schur('B', generalized=True) C - D*(B.T*B)**(-1)*B.T*A >>> X.schur('C', generalized=True) -A*(C.T*C)**(-1)*C.T*D + B Returns ======= M : Matrix The Schur Complement Matrix Raises ====== ShapeError If the block matrix is not a 2x2 matrix NonInvertibleMatrixError If given matrix is non-invertible References ========== .. [1] Wikipedia Article on Schur Component : https://en.wikipedia.org/wiki/Schur_complement See Also ======== sympy.matrices.matrixbase.MatrixBase.pinv """ if self.blockshape == (2, 2): [[A, B], [C, D]] = self.blocks.tolist() d={'A' : A, 'B' : B, 'C' : C, 'D' : D} try: inv = (d[mat].T*d[mat]).inv()*d[mat].T if generalized else d[mat].inv() if mat == 'A': return D - C * inv * B elif mat == 'B': return C - D * inv * A elif mat == 'C': return B - A * inv * D elif mat == 'D': return A - B * inv * C #For matrices where no sub-matrix is square return self except NonInvertibleMatrixError: raise NonInvertibleMatrixError('The given matrix is not invertible. Please set generalized=True \ to compute the generalized Schur Complement which uses Moore-Penrose Inverse') else: raise ShapeError('Schur Complement can only be calculated for 2x2 block matrices')
BlockMatrix.schur
sympy
4
sympy/physics/mechanics/body.py
def apply_force(self, force, point=None, reaction_body=None, reaction_point=None): """Add force to the body(s). Explanation =========== Applies the force on self or equal and opposite forces on self and other body if both are given on the desired point on the bodies. The force applied on other body is taken opposite of self, i.e, -force. Parameters ========== force: Vector The force to be applied. point: Point, optional The point on self on which force is applied. By default self's masscenter. reaction_body: Body, optional Second body on which equal and opposite force is to be applied. reaction_point : Point, optional The point on other body on which equal and opposite force is applied. By default masscenter of other body. Example ======= As Body has been deprecated, the following examples are for illustrative purposes only. The functionality of Body is fully captured by :class:`~.RigidBody` and :class:`~.Particle`. To ignore the deprecation warning we can use the ignore_warnings context manager. >>> from sympy.utilities.exceptions import ignore_warnings >>> from sympy import symbols >>> from sympy.physics.mechanics import Body, Point, dynamicsymbols >>> m, g = symbols('m g') >>> with ignore_warnings(DeprecationWarning): ... B = Body('B') >>> force1 = m*g*B.z >>> B.apply_force(force1) #Applying force on B's masscenter >>> B.loads [(B_masscenter, g*m*B_frame.z)] We can also remove some part of force from any point on the body by adding the opposite force to the body on that point. >>> f1, f2 = dynamicsymbols('f1 f2') >>> P = Point('P') #Considering point P on body B >>> B.apply_force(f1*B.x + f2*B.y, P) >>> B.loads [(B_masscenter, g*m*B_frame.z), (P, f1(t)*B_frame.x + f2(t)*B_frame.y)] Let's remove f1 from point P on body B. >>> B.apply_force(-f1*B.x, P) >>> B.loads [(B_masscenter, g*m*B_frame.z), (P, f2(t)*B_frame.y)] To further demonstrate the use of ``apply_force`` attribute, consider two bodies connected through a spring. >>> from sympy.physics.mechanics import Body, dynamicsymbols >>> with ignore_warnings(DeprecationWarning): ... N = Body('N') #Newtonion Frame >>> x = dynamicsymbols('x') >>> with ignore_warnings(DeprecationWarning): ... B1 = Body('B1') ... B2 = Body('B2') >>> spring_force = x*N.x Now let's apply equal and opposite spring force to the bodies. >>> P1 = Point('P1') >>> P2 = Point('P2') >>> B1.apply_force(spring_force, point=P1, reaction_body=B2, reaction_point=P2) We can check the loads(forces) applied to bodies now. >>> B1.loads [(P1, x(t)*N_frame.x)] >>> B2.loads [(P2, - x(t)*N_frame.x)] Notes ===== If a new force is applied to a body on a point which already has some force applied on it, then the new force is added to the already applied force on that point. """
/usr/src/app/target_test_cases/failed_tests_Body.apply_force.txt
def apply_force(self, force, point=None, reaction_body=None, reaction_point=None): """Add force to the body(s). Explanation =========== Applies the force on self or equal and opposite forces on self and other body if both are given on the desired point on the bodies. The force applied on other body is taken opposite of self, i.e, -force. Parameters ========== force: Vector The force to be applied. point: Point, optional The point on self on which force is applied. By default self's masscenter. reaction_body: Body, optional Second body on which equal and opposite force is to be applied. reaction_point : Point, optional The point on other body on which equal and opposite force is applied. By default masscenter of other body. Example ======= As Body has been deprecated, the following examples are for illustrative purposes only. The functionality of Body is fully captured by :class:`~.RigidBody` and :class:`~.Particle`. To ignore the deprecation warning we can use the ignore_warnings context manager. >>> from sympy.utilities.exceptions import ignore_warnings >>> from sympy import symbols >>> from sympy.physics.mechanics import Body, Point, dynamicsymbols >>> m, g = symbols('m g') >>> with ignore_warnings(DeprecationWarning): ... B = Body('B') >>> force1 = m*g*B.z >>> B.apply_force(force1) #Applying force on B's masscenter >>> B.loads [(B_masscenter, g*m*B_frame.z)] We can also remove some part of force from any point on the body by adding the opposite force to the body on that point. >>> f1, f2 = dynamicsymbols('f1 f2') >>> P = Point('P') #Considering point P on body B >>> B.apply_force(f1*B.x + f2*B.y, P) >>> B.loads [(B_masscenter, g*m*B_frame.z), (P, f1(t)*B_frame.x + f2(t)*B_frame.y)] Let's remove f1 from point P on body B. >>> B.apply_force(-f1*B.x, P) >>> B.loads [(B_masscenter, g*m*B_frame.z), (P, f2(t)*B_frame.y)] To further demonstrate the use of ``apply_force`` attribute, consider two bodies connected through a spring. >>> from sympy.physics.mechanics import Body, dynamicsymbols >>> with ignore_warnings(DeprecationWarning): ... N = Body('N') #Newtonion Frame >>> x = dynamicsymbols('x') >>> with ignore_warnings(DeprecationWarning): ... B1 = Body('B1') ... B2 = Body('B2') >>> spring_force = x*N.x Now let's apply equal and opposite spring force to the bodies. >>> P1 = Point('P1') >>> P2 = Point('P2') >>> B1.apply_force(spring_force, point=P1, reaction_body=B2, reaction_point=P2) We can check the loads(forces) applied to bodies now. >>> B1.loads [(P1, x(t)*N_frame.x)] >>> B2.loads [(P2, - x(t)*N_frame.x)] Notes ===== If a new force is applied to a body on a point which already has some force applied on it, then the new force is added to the already applied force on that point. """ if not isinstance(point, Point): if point is None: point = self.masscenter # masscenter else: raise TypeError("Force must be applied to a point on the body.") if not isinstance(force, Vector): raise TypeError("Force must be a vector.") if reaction_body is not None: reaction_body.apply_force(-force, point=reaction_point) for load in self._loads: if point in load: force += load[1] self._loads.remove(load) break self._loads.append((point, force))
Body.apply_force
sympy
5
sympy/physics/mechanics/body.py
def apply_torque(self, torque, reaction_body=None): """Add torque to the body(s). Explanation =========== Applies the torque on self or equal and opposite torques on self and other body if both are given. The torque applied on other body is taken opposite of self, i.e, -torque. Parameters ========== torque: Vector The torque to be applied. reaction_body: Body, optional Second body on which equal and opposite torque is to be applied. Example ======= As Body has been deprecated, the following examples are for illustrative purposes only. The functionality of Body is fully captured by :class:`~.RigidBody` and :class:`~.Particle`. To ignore the deprecation warning we can use the ignore_warnings context manager. >>> from sympy.utilities.exceptions import ignore_warnings >>> from sympy import symbols >>> from sympy.physics.mechanics import Body, dynamicsymbols >>> t = symbols('t') >>> with ignore_warnings(DeprecationWarning): ... B = Body('B') >>> torque1 = t*B.z >>> B.apply_torque(torque1) >>> B.loads [(B_frame, t*B_frame.z)] We can also remove some part of torque from the body by adding the opposite torque to the body. >>> t1, t2 = dynamicsymbols('t1 t2') >>> B.apply_torque(t1*B.x + t2*B.y) >>> B.loads [(B_frame, t1(t)*B_frame.x + t2(t)*B_frame.y + t*B_frame.z)] Let's remove t1 from Body B. >>> B.apply_torque(-t1*B.x) >>> B.loads [(B_frame, t2(t)*B_frame.y + t*B_frame.z)] To further demonstrate the use, let us consider two bodies such that a torque `T` is acting on one body, and `-T` on the other. >>> from sympy.physics.mechanics import Body, dynamicsymbols >>> with ignore_warnings(DeprecationWarning): ... N = Body('N') #Newtonion frame ... B1 = Body('B1') ... B2 = Body('B2') >>> v = dynamicsymbols('v') >>> T = v*N.y #Torque Now let's apply equal and opposite torque to the bodies. >>> B1.apply_torque(T, B2) We can check the loads (torques) applied to bodies now. >>> B1.loads [(B1_frame, v(t)*N_frame.y)] >>> B2.loads [(B2_frame, - v(t)*N_frame.y)] Notes ===== If a new torque is applied on body which already has some torque applied on it, then the new torque is added to the previous torque about the body's frame. """
/usr/src/app/target_test_cases/failed_tests_Body.apply_torque.txt
def apply_torque(self, torque, reaction_body=None): """Add torque to the body(s). Explanation =========== Applies the torque on self or equal and opposite torques on self and other body if both are given. The torque applied on other body is taken opposite of self, i.e, -torque. Parameters ========== torque: Vector The torque to be applied. reaction_body: Body, optional Second body on which equal and opposite torque is to be applied. Example ======= As Body has been deprecated, the following examples are for illustrative purposes only. The functionality of Body is fully captured by :class:`~.RigidBody` and :class:`~.Particle`. To ignore the deprecation warning we can use the ignore_warnings context manager. >>> from sympy.utilities.exceptions import ignore_warnings >>> from sympy import symbols >>> from sympy.physics.mechanics import Body, dynamicsymbols >>> t = symbols('t') >>> with ignore_warnings(DeprecationWarning): ... B = Body('B') >>> torque1 = t*B.z >>> B.apply_torque(torque1) >>> B.loads [(B_frame, t*B_frame.z)] We can also remove some part of torque from the body by adding the opposite torque to the body. >>> t1, t2 = dynamicsymbols('t1 t2') >>> B.apply_torque(t1*B.x + t2*B.y) >>> B.loads [(B_frame, t1(t)*B_frame.x + t2(t)*B_frame.y + t*B_frame.z)] Let's remove t1 from Body B. >>> B.apply_torque(-t1*B.x) >>> B.loads [(B_frame, t2(t)*B_frame.y + t*B_frame.z)] To further demonstrate the use, let us consider two bodies such that a torque `T` is acting on one body, and `-T` on the other. >>> from sympy.physics.mechanics import Body, dynamicsymbols >>> with ignore_warnings(DeprecationWarning): ... N = Body('N') #Newtonion frame ... B1 = Body('B1') ... B2 = Body('B2') >>> v = dynamicsymbols('v') >>> T = v*N.y #Torque Now let's apply equal and opposite torque to the bodies. >>> B1.apply_torque(T, B2) We can check the loads (torques) applied to bodies now. >>> B1.loads [(B1_frame, v(t)*N_frame.y)] >>> B2.loads [(B2_frame, - v(t)*N_frame.y)] Notes ===== If a new torque is applied on body which already has some torque applied on it, then the new torque is added to the previous torque about the body's frame. """ if not isinstance(torque, Vector): raise TypeError("A Vector must be supplied to add torque.") if reaction_body is not None: reaction_body.apply_torque(-torque) for load in self._loads: if self.frame in load: torque += load[1] self._loads.remove(load) break self._loads.append((self.frame, torque))
Body.apply_torque
sympy
6
sympy/physics/continuum_mechanics/cable.py
def apply_load(self, order, load): """ This method adds load to the cable. Parameters ========== order : Integer The order of the applied load. - For point loads, order = -1 - For distributed load, order = 0 load : tuple * For point loads, load is of the form (label, x, y, magnitude, direction), where: label : String or symbol The label of the load x : Sympifyable The x coordinate of the position of the load y : Sympifyable The y coordinate of the position of the load magnitude : Sympifyable The magnitude of the load. It must always be positive direction : Sympifyable The angle, in degrees, that the load vector makes with the horizontal in the counter-clockwise direction. It takes the values 0 to 360, inclusive. * For uniformly distributed load, load is of the form (label, magnitude) label : String or symbol The label of the load magnitude : Sympifyable The magnitude of the load. It must always be positive Examples ======== For a point load of magnitude 12 units inclined at 30 degrees with the horizontal: >>> from sympy.physics.continuum_mechanics.cable import Cable >>> c = Cable(('A', 0, 10), ('B', 10, 10)) >>> c.apply_load(-1, ('Z', 5, 5, 12, 30)) >>> c.loads {'distributed': {}, 'point_load': {'Z': [12, 30]}} >>> c.loads_position {'Z': [5, 5]} For a uniformly distributed load of magnitude 9 units: >>> from sympy.physics.continuum_mechanics.cable import Cable >>> c = Cable(('A', 0, 10), ('B', 10, 10)) >>> c.apply_load(0, ('X', 9)) >>> c.loads {'distributed': {'X': 9}, 'point_load': {}} """
/usr/src/app/target_test_cases/failed_tests_Cable.apply_load.txt
def apply_load(self, order, load): """ This method adds load to the cable. Parameters ========== order : Integer The order of the applied load. - For point loads, order = -1 - For distributed load, order = 0 load : tuple * For point loads, load is of the form (label, x, y, magnitude, direction), where: label : String or symbol The label of the load x : Sympifyable The x coordinate of the position of the load y : Sympifyable The y coordinate of the position of the load magnitude : Sympifyable The magnitude of the load. It must always be positive direction : Sympifyable The angle, in degrees, that the load vector makes with the horizontal in the counter-clockwise direction. It takes the values 0 to 360, inclusive. * For uniformly distributed load, load is of the form (label, magnitude) label : String or symbol The label of the load magnitude : Sympifyable The magnitude of the load. It must always be positive Examples ======== For a point load of magnitude 12 units inclined at 30 degrees with the horizontal: >>> from sympy.physics.continuum_mechanics.cable import Cable >>> c = Cable(('A', 0, 10), ('B', 10, 10)) >>> c.apply_load(-1, ('Z', 5, 5, 12, 30)) >>> c.loads {'distributed': {}, 'point_load': {'Z': [12, 30]}} >>> c.loads_position {'Z': [5, 5]} For a uniformly distributed load of magnitude 9 units: >>> from sympy.physics.continuum_mechanics.cable import Cable >>> c = Cable(('A', 0, 10), ('B', 10, 10)) >>> c.apply_load(0, ('X', 9)) >>> c.loads {'distributed': {'X': 9}, 'point_load': {}} """ if order == -1: if len(self._loads["distributed"]) != 0: raise ValueError("Distributed load already exists") label = load[0] if label in self._loads["point_load"]: raise ValueError("Label already exists") x = sympify(load[1]) y = sympify(load[2]) if x > self._right_support[0] or x < self._left_support[0]: raise ValueError("The load should be positioned between the supports") magnitude = sympify(load[3]) direction = sympify(load[4]) self._loads["point_load"][label] = [magnitude, direction] self._loads_position[label] = [x, y] elif order == 0: if len(self._loads_position) != 0: raise ValueError("Point load(s) already exist") label = load[0] if label in self._loads["distributed"]: raise ValueError("Label already exists") magnitude = sympify(load[1]) self._loads["distributed"][label] = magnitude else: raise ValueError("Order should be either -1 or 0")
Cable.apply_load
sympy
7
sympy/vector/coordsysrect.py
def orient_new_body(self, name, angle1, angle2, angle3, rotation_order, location=None, vector_names=None, variable_names=None): """ Body orientation takes this coordinate system through three successive simple rotations. Body fixed rotations include both Euler Angles and Tait-Bryan Angles, see https://en.wikipedia.org/wiki/Euler_angles. Parameters ========== name : string The name of the new coordinate system angle1, angle2, angle3 : Expr Three successive angles to rotate the coordinate system by rotation_order : string String defining the order of axes for rotation location : Vector(optional) The location of the new coordinate system's origin wrt this system's origin. If not specified, the origins are taken to be coincident. vector_names, variable_names : iterable(optional) Iterables of 3 strings each, with custom names for base vectors and base scalars of the new system respectively. Used for simple str printing. Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy import symbols >>> q1, q2, q3 = symbols('q1 q2 q3') >>> N = CoordSys3D('N') A 'Body' fixed rotation is described by three angles and three body-fixed rotation axes. To orient a coordinate system D with respect to N, each sequential rotation is always about the orthogonal unit vectors fixed to D. For example, a '123' rotation will specify rotations about N.i, then D.j, then D.k. (Initially, D.i is same as N.i) Therefore, >>> D = N.orient_new_body('D', q1, q2, q3, '123') is same as >>> D = N.orient_new_axis('D', q1, N.i) >>> D = D.orient_new_axis('D', q2, D.j) >>> D = D.orient_new_axis('D', q3, D.k) Acceptable rotation orders are of length 3, expressed in XYZ or 123, and cannot have a rotation about about an axis twice in a row. >>> B = N.orient_new_body('B', q1, q2, q3, '123') >>> B = N.orient_new_body('B', q1, q2, 0, 'ZXZ') >>> B = N.orient_new_body('B', 0, 0, 0, 'XYX') """
/usr/src/app/target_test_cases/failed_tests_CoordSys3D.orient_new_body.txt
def orient_new_body(self, name, angle1, angle2, angle3, rotation_order, location=None, vector_names=None, variable_names=None): """ Body orientation takes this coordinate system through three successive simple rotations. Body fixed rotations include both Euler Angles and Tait-Bryan Angles, see https://en.wikipedia.org/wiki/Euler_angles. Parameters ========== name : string The name of the new coordinate system angle1, angle2, angle3 : Expr Three successive angles to rotate the coordinate system by rotation_order : string String defining the order of axes for rotation location : Vector(optional) The location of the new coordinate system's origin wrt this system's origin. If not specified, the origins are taken to be coincident. vector_names, variable_names : iterable(optional) Iterables of 3 strings each, with custom names for base vectors and base scalars of the new system respectively. Used for simple str printing. Examples ======== >>> from sympy.vector import CoordSys3D >>> from sympy import symbols >>> q1, q2, q3 = symbols('q1 q2 q3') >>> N = CoordSys3D('N') A 'Body' fixed rotation is described by three angles and three body-fixed rotation axes. To orient a coordinate system D with respect to N, each sequential rotation is always about the orthogonal unit vectors fixed to D. For example, a '123' rotation will specify rotations about N.i, then D.j, then D.k. (Initially, D.i is same as N.i) Therefore, >>> D = N.orient_new_body('D', q1, q2, q3, '123') is same as >>> D = N.orient_new_axis('D', q1, N.i) >>> D = D.orient_new_axis('D', q2, D.j) >>> D = D.orient_new_axis('D', q3, D.k) Acceptable rotation orders are of length 3, expressed in XYZ or 123, and cannot have a rotation about about an axis twice in a row. >>> B = N.orient_new_body('B', q1, q2, q3, '123') >>> B = N.orient_new_body('B', q1, q2, 0, 'ZXZ') >>> B = N.orient_new_body('B', 0, 0, 0, 'XYX') """ orienter = BodyOrienter(angle1, angle2, angle3, rotation_order) return self.orient_new(name, orienter, location=location, vector_names=vector_names, variable_names=variable_names)
CoordSys3D.orient_new_body
sympy
8
sympy/stats/stochastic_process_types.py
def canonical_form(self) -> tTuple[tList[Basic], ImmutableMatrix]: """ Reorders the one-step transition matrix so that recurrent states appear first and transient states appear last. Other representations include inserting transient states first and recurrent states last. Returns ======= states, P_new ``states`` is the list that describes the order of the new states in the matrix so that the ith element in ``states`` is the state of the ith row of A. ``P_new`` is the new transition matrix in canonical form. Examples ======== >>> from sympy.stats import DiscreteMarkovChain >>> from sympy import Matrix, S You can convert your chain into canonical form: >>> T = Matrix([[S(1)/2, S(1)/2, 0, 0, 0], ... [S(2)/5, S(1)/5, S(2)/5, 0, 0], ... [0, 0, 1, 0, 0], ... [0, 0, S(1)/2, S(1)/2, 0], ... [S(1)/2, 0, 0, 0, S(1)/2]]) >>> X = DiscreteMarkovChain('X', list(range(1, 6)), trans_probs=T) >>> states, new_matrix = X.canonical_form() >>> states [3, 1, 2, 4, 5] >>> new_matrix Matrix([ [ 1, 0, 0, 0, 0], [ 0, 1/2, 1/2, 0, 0], [2/5, 2/5, 1/5, 0, 0], [1/2, 0, 0, 1/2, 0], [ 0, 1/2, 0, 0, 1/2]]) The new states are [3, 1, 2, 4, 5] and you can create a new chain with this and its canonical form will remain the same (since it is already in canonical form). >>> X = DiscreteMarkovChain('X', states, new_matrix) >>> states, new_matrix = X.canonical_form() >>> states [3, 1, 2, 4, 5] >>> new_matrix Matrix([ [ 1, 0, 0, 0, 0], [ 0, 1/2, 1/2, 0, 0], [2/5, 2/5, 1/5, 0, 0], [1/2, 0, 0, 1/2, 0], [ 0, 1/2, 0, 0, 1/2]]) This is not limited to absorbing chains: >>> T = Matrix([[0, 5, 5, 0, 0], ... [0, 0, 0, 10, 0], ... [5, 0, 5, 0, 0], ... [0, 10, 0, 0, 0], ... [0, 3, 0, 3, 4]])/10 >>> X = DiscreteMarkovChain('X', trans_probs=T) >>> states, new_matrix = X.canonical_form() >>> states [1, 3, 0, 2, 4] >>> new_matrix Matrix([ [ 0, 1, 0, 0, 0], [ 1, 0, 0, 0, 0], [ 1/2, 0, 0, 1/2, 0], [ 0, 0, 1/2, 1/2, 0], [3/10, 3/10, 0, 0, 2/5]]) See Also ======== sympy.stats.DiscreteMarkovChain.communication_classes sympy.stats.DiscreteMarkovChain.decompose References ========== .. [1] https://onlinelibrary.wiley.com/doi/pdf/10.1002/9780470316887.app1 .. [2] http://www.columbia.edu/~ww2040/6711F12/lect1023big.pdf """
/usr/src/app/target_test_cases/failed_tests_DiscreteMarkovChain.canonical_form.txt
def canonical_form(self) -> tTuple[tList[Basic], ImmutableMatrix]: """ Reorders the one-step transition matrix so that recurrent states appear first and transient states appear last. Other representations include inserting transient states first and recurrent states last. Returns ======= states, P_new ``states`` is the list that describes the order of the new states in the matrix so that the ith element in ``states`` is the state of the ith row of A. ``P_new`` is the new transition matrix in canonical form. Examples ======== >>> from sympy.stats import DiscreteMarkovChain >>> from sympy import Matrix, S You can convert your chain into canonical form: >>> T = Matrix([[S(1)/2, S(1)/2, 0, 0, 0], ... [S(2)/5, S(1)/5, S(2)/5, 0, 0], ... [0, 0, 1, 0, 0], ... [0, 0, S(1)/2, S(1)/2, 0], ... [S(1)/2, 0, 0, 0, S(1)/2]]) >>> X = DiscreteMarkovChain('X', list(range(1, 6)), trans_probs=T) >>> states, new_matrix = X.canonical_form() >>> states [3, 1, 2, 4, 5] >>> new_matrix Matrix([ [ 1, 0, 0, 0, 0], [ 0, 1/2, 1/2, 0, 0], [2/5, 2/5, 1/5, 0, 0], [1/2, 0, 0, 1/2, 0], [ 0, 1/2, 0, 0, 1/2]]) The new states are [3, 1, 2, 4, 5] and you can create a new chain with this and its canonical form will remain the same (since it is already in canonical form). >>> X = DiscreteMarkovChain('X', states, new_matrix) >>> states, new_matrix = X.canonical_form() >>> states [3, 1, 2, 4, 5] >>> new_matrix Matrix([ [ 1, 0, 0, 0, 0], [ 0, 1/2, 1/2, 0, 0], [2/5, 2/5, 1/5, 0, 0], [1/2, 0, 0, 1/2, 0], [ 0, 1/2, 0, 0, 1/2]]) This is not limited to absorbing chains: >>> T = Matrix([[0, 5, 5, 0, 0], ... [0, 0, 0, 10, 0], ... [5, 0, 5, 0, 0], ... [0, 10, 0, 0, 0], ... [0, 3, 0, 3, 4]])/10 >>> X = DiscreteMarkovChain('X', trans_probs=T) >>> states, new_matrix = X.canonical_form() >>> states [1, 3, 0, 2, 4] >>> new_matrix Matrix([ [ 0, 1, 0, 0, 0], [ 1, 0, 0, 0, 0], [ 1/2, 0, 0, 1/2, 0], [ 0, 0, 1/2, 1/2, 0], [3/10, 3/10, 0, 0, 2/5]]) See Also ======== sympy.stats.DiscreteMarkovChain.communication_classes sympy.stats.DiscreteMarkovChain.decompose References ========== .. [1] https://onlinelibrary.wiley.com/doi/pdf/10.1002/9780470316887.app1 .. [2] http://www.columbia.edu/~ww2040/6711F12/lect1023big.pdf """ states, A, B, C = self.decompose() O = zeros(A.shape[0], C.shape[1]) return states, BlockMatrix([[A, O], [B, C]]).as_explicit()
DiscreteMarkovChain.canonical_form
sympy
9
sympy/stats/stochastic_process_types.py
def communication_classes(self) -> tList[tTuple[tList[Basic], Boolean, Integer]]: """ Returns the list of communication classes that partition the states of the markov chain. A communication class is defined to be a set of states such that every state in that set is reachable from every other state in that set. Due to its properties this forms a class in the mathematical sense. Communication classes are also known as recurrence classes. Returns ======= classes The ``classes`` are a list of tuples. Each tuple represents a single communication class with its properties. The first element in the tuple is the list of states in the class, the second element is whether the class is recurrent and the third element is the period of the communication class. Examples ======== >>> from sympy.stats import DiscreteMarkovChain >>> from sympy import Matrix >>> T = Matrix([[0, 1, 0], ... [1, 0, 0], ... [1, 0, 0]]) >>> X = DiscreteMarkovChain('X', [1, 2, 3], T) >>> classes = X.communication_classes() >>> for states, is_recurrent, period in classes: ... states, is_recurrent, period ([1, 2], True, 2) ([3], False, 1) From this we can see that states ``1`` and ``2`` communicate, are recurrent and have a period of 2. We can also see state ``3`` is transient with a period of 1. Notes ===== The algorithm used is of order ``O(n**2)`` where ``n`` is the number of states in the markov chain. It uses Tarjan's algorithm to find the classes themselves and then it uses a breadth-first search algorithm to find each class's periodicity. Most of the algorithm's components approach ``O(n)`` as the matrix becomes more and more sparse. References ========== .. [1] https://web.archive.org/web/20220207032113/https://www.columbia.edu/~ww2040/4701Sum07/4701-06-Notes-MCII.pdf .. [2] https://cecas.clemson.edu/~shierd/Shier/markov.pdf .. [3] https://www.proquest.com/openview/4adc6a51d8371be5b0e4c7dff287fc70/1?pq-origsite=gscholar&cbl=2026366&diss=y .. [4] https://www.mathworks.com/help/econ/dtmc.classify.html """
/usr/src/app/target_test_cases/failed_tests_DiscreteMarkovChain.communication_classes.txt
def communication_classes(self) -> tList[tTuple[tList[Basic], Boolean, Integer]]: """ Returns the list of communication classes that partition the states of the markov chain. A communication class is defined to be a set of states such that every state in that set is reachable from every other state in that set. Due to its properties this forms a class in the mathematical sense. Communication classes are also known as recurrence classes. Returns ======= classes The ``classes`` are a list of tuples. Each tuple represents a single communication class with its properties. The first element in the tuple is the list of states in the class, the second element is whether the class is recurrent and the third element is the period of the communication class. Examples ======== >>> from sympy.stats import DiscreteMarkovChain >>> from sympy import Matrix >>> T = Matrix([[0, 1, 0], ... [1, 0, 0], ... [1, 0, 0]]) >>> X = DiscreteMarkovChain('X', [1, 2, 3], T) >>> classes = X.communication_classes() >>> for states, is_recurrent, period in classes: ... states, is_recurrent, period ([1, 2], True, 2) ([3], False, 1) From this we can see that states ``1`` and ``2`` communicate, are recurrent and have a period of 2. We can also see state ``3`` is transient with a period of 1. Notes ===== The algorithm used is of order ``O(n**2)`` where ``n`` is the number of states in the markov chain. It uses Tarjan's algorithm to find the classes themselves and then it uses a breadth-first search algorithm to find each class's periodicity. Most of the algorithm's components approach ``O(n)`` as the matrix becomes more and more sparse. References ========== .. [1] https://web.archive.org/web/20220207032113/https://www.columbia.edu/~ww2040/4701Sum07/4701-06-Notes-MCII.pdf .. [2] https://cecas.clemson.edu/~shierd/Shier/markov.pdf .. [3] https://www.proquest.com/openview/4adc6a51d8371be5b0e4c7dff287fc70/1?pq-origsite=gscholar&cbl=2026366&diss=y .. [4] https://www.mathworks.com/help/econ/dtmc.classify.html """ n = self.number_of_states T = self.transition_probabilities if isinstance(T, MatrixSymbol): raise NotImplementedError("Cannot perform the operation with a symbolic matrix.") # begin Tarjan's algorithm V = Range(n) # don't use state names. Rather use state # indexes since we use them for matrix # indexing here and later onward E = [(i, j) for i in V for j in V if T[i, j] != 0] classes = strongly_connected_components((V, E)) # end Tarjan's algorithm recurrence = [] periods = [] for class_ in classes: # begin recurrent check (similar to self._check_trans_probs()) submatrix = T[class_, class_] # get the submatrix with those states is_recurrent = S.true rows = submatrix.tolist() for row in rows: if (sum(row) - 1) != 0: is_recurrent = S.false break recurrence.append(is_recurrent) # end recurrent check # begin breadth-first search non_tree_edge_values: tSet[int] = set() visited = {class_[0]} newly_visited = {class_[0]} level = {class_[0]: 0} current_level = 0 done = False # imitate a do-while loop while not done: # runs at most len(class_) times done = len(visited) == len(class_) current_level += 1 # this loop and the while loop above run a combined len(class_) number of times. # so this triple nested loop runs through each of the n states once. for i in newly_visited: # the loop below runs len(class_) number of times # complexity is around about O(n * avg(len(class_))) newly_visited = {j for j in class_ if T[i, j] != 0} new_tree_edges = newly_visited.difference(visited) for j in new_tree_edges: level[j] = current_level new_non_tree_edges = newly_visited.intersection(visited) new_non_tree_edge_values = {level[i]-level[j]+1 for j in new_non_tree_edges} non_tree_edge_values = non_tree_edge_values.union(new_non_tree_edge_values) visited = visited.union(new_tree_edges) # igcd needs at least 2 arguments positive_ntev = {val_e for val_e in non_tree_edge_values if val_e > 0} if len(positive_ntev) == 0: periods.append(len(class_)) elif len(positive_ntev) == 1: periods.append(positive_ntev.pop()) else: periods.append(igcd(*positive_ntev)) # end breadth-first search # convert back to the user's state names classes = [[_sympify(self._state_index[i]) for i in class_] for class_ in classes] return list(zip(classes, recurrence, map(Integer,periods)))
DiscreteMarkovChain.communication_classes
sympy
10
sympy/stats/stochastic_process_types.py
def decompose(self) -> tTuple[tList[Basic], ImmutableMatrix, ImmutableMatrix, ImmutableMatrix]: """ Decomposes the transition matrix into submatrices with special properties. The transition matrix can be decomposed into 4 submatrices: - A - the submatrix from recurrent states to recurrent states. - B - the submatrix from transient to recurrent states. - C - the submatrix from transient to transient states. - O - the submatrix of zeros for recurrent to transient states. Returns ======= states, A, B, C ``states`` - a list of state names with the first being the recurrent states and the last being the transient states in the order of the row names of A and then the row names of C. ``A`` - the submatrix from recurrent states to recurrent states. ``B`` - the submatrix from transient to recurrent states. ``C`` - the submatrix from transient to transient states. Examples ======== >>> from sympy.stats import DiscreteMarkovChain >>> from sympy import Matrix, S One can decompose this chain for example: >>> T = Matrix([[S(1)/2, S(1)/2, 0, 0, 0], ... [S(2)/5, S(1)/5, S(2)/5, 0, 0], ... [0, 0, 1, 0, 0], ... [0, 0, S(1)/2, S(1)/2, 0], ... [S(1)/2, 0, 0, 0, S(1)/2]]) >>> X = DiscreteMarkovChain('X', trans_probs=T) >>> states, A, B, C = X.decompose() >>> states [2, 0, 1, 3, 4] >>> A # recurrent to recurrent Matrix([[1]]) >>> B # transient to recurrent Matrix([ [ 0], [2/5], [1/2], [ 0]]) >>> C # transient to transient Matrix([ [1/2, 1/2, 0, 0], [2/5, 1/5, 0, 0], [ 0, 0, 1/2, 0], [1/2, 0, 0, 1/2]]) This means that state 2 is the only absorbing state (since A is a 1x1 matrix). B is a 4x1 matrix since the 4 remaining transient states all merge into reccurent state 2. And C is the 4x4 matrix that shows how the transient states 0, 1, 3, 4 all interact. See Also ======== sympy.stats.DiscreteMarkovChain.communication_classes sympy.stats.DiscreteMarkovChain.canonical_form References ========== .. [1] https://en.wikipedia.org/wiki/Absorbing_Markov_chain .. [2] https://people.brandeis.edu/~igusa/Math56aS08/Math56a_S08_notes015.pdf """
/usr/src/app/target_test_cases/failed_tests_DiscreteMarkovChain.decompose.txt
def decompose(self) -> tTuple[tList[Basic], ImmutableMatrix, ImmutableMatrix, ImmutableMatrix]: """ Decomposes the transition matrix into submatrices with special properties. The transition matrix can be decomposed into 4 submatrices: - A - the submatrix from recurrent states to recurrent states. - B - the submatrix from transient to recurrent states. - C - the submatrix from transient to transient states. - O - the submatrix of zeros for recurrent to transient states. Returns ======= states, A, B, C ``states`` - a list of state names with the first being the recurrent states and the last being the transient states in the order of the row names of A and then the row names of C. ``A`` - the submatrix from recurrent states to recurrent states. ``B`` - the submatrix from transient to recurrent states. ``C`` - the submatrix from transient to transient states. Examples ======== >>> from sympy.stats import DiscreteMarkovChain >>> from sympy import Matrix, S One can decompose this chain for example: >>> T = Matrix([[S(1)/2, S(1)/2, 0, 0, 0], ... [S(2)/5, S(1)/5, S(2)/5, 0, 0], ... [0, 0, 1, 0, 0], ... [0, 0, S(1)/2, S(1)/2, 0], ... [S(1)/2, 0, 0, 0, S(1)/2]]) >>> X = DiscreteMarkovChain('X', trans_probs=T) >>> states, A, B, C = X.decompose() >>> states [2, 0, 1, 3, 4] >>> A # recurrent to recurrent Matrix([[1]]) >>> B # transient to recurrent Matrix([ [ 0], [2/5], [1/2], [ 0]]) >>> C # transient to transient Matrix([ [1/2, 1/2, 0, 0], [2/5, 1/5, 0, 0], [ 0, 0, 1/2, 0], [1/2, 0, 0, 1/2]]) This means that state 2 is the only absorbing state (since A is a 1x1 matrix). B is a 4x1 matrix since the 4 remaining transient states all merge into reccurent state 2. And C is the 4x4 matrix that shows how the transient states 0, 1, 3, 4 all interact. See Also ======== sympy.stats.DiscreteMarkovChain.communication_classes sympy.stats.DiscreteMarkovChain.canonical_form References ========== .. [1] https://en.wikipedia.org/wiki/Absorbing_Markov_chain .. [2] https://people.brandeis.edu/~igusa/Math56aS08/Math56a_S08_notes015.pdf """ trans_probs = self.transition_probabilities classes = self.communication_classes() r_states = [] t_states = [] for states, recurrent, period in classes: if recurrent: r_states += states else: t_states += states states = r_states + t_states indexes = [self.index_of[state] for state in states] # type: ignore A = Matrix(len(r_states), len(r_states), lambda i, j: trans_probs[indexes[i], indexes[j]]) B = Matrix(len(t_states), len(r_states), lambda i, j: trans_probs[indexes[len(r_states) + i], indexes[j]]) C = Matrix(len(t_states), len(t_states), lambda i, j: trans_probs[indexes[len(r_states) + i], indexes[len(r_states) + j]]) return states, A.as_immutable(), B.as_immutable(), C.as_immutable()
DiscreteMarkovChain.decompose
sympy
11
sympy/polys/matrices/domainmatrix.py
def inv_den(self, method=None): """ Return the inverse as a :class:`DomainMatrix` with denominator. Returns ======= (inv, den) : (:class:`DomainMatrix`, :class:`~.DomainElement`) The inverse matrix and its denominator. This is more or less equivalent to :meth:`adj_det` except that ``inv`` and ``den`` are not guaranteed to be the adjugate and inverse. The ratio ``inv/den`` is equivalent to ``adj/det`` but some factors might be cancelled between ``inv`` and ``den``. In simple cases this might just be a minus sign so that ``(inv, den) == (-adj, -det)`` but factors more complicated than ``-1`` can also be cancelled. Cancellation is not guaranteed to be complete so ``inv`` and ``den`` may not be on lowest terms. The denominator ``den`` will be zero if and only if the determinant is zero. If the actual adjugate and determinant are needed, use :meth:`adj_det` instead. If the intention is to compute the inverse matrix or solve a system of equations then :meth:`inv_den` is more efficient. Examples ======== >>> from sympy import ZZ >>> from sympy.polys.matrices import DomainMatrix >>> A = DomainMatrix([ ... [ZZ(2), ZZ(-1), ZZ(0)], ... [ZZ(-1), ZZ(2), ZZ(-1)], ... [ZZ(0), ZZ(0), ZZ(2)]], (3, 3), ZZ) >>> Ainv, den = A.inv_den() >>> den 6 >>> Ainv DomainMatrix([[4, 2, 1], [2, 4, 2], [0, 0, 3]], (3, 3), ZZ) >>> A * Ainv == den * A.eye(A.shape, A.domain).to_dense() True Parameters ========== method : str, optional The method to use to compute the inverse. Can be one of ``None``, ``'rref'`` or ``'charpoly'``. If ``None`` then the method is chosen automatically (see :meth:`solve_den` for details). See Also ======== inv det adj_det solve_den """
/usr/src/app/target_test_cases/failed_tests_DomainMatrix.inv_den.txt
def inv_den(self, method=None): """ Return the inverse as a :class:`DomainMatrix` with denominator. Returns ======= (inv, den) : (:class:`DomainMatrix`, :class:`~.DomainElement`) The inverse matrix and its denominator. This is more or less equivalent to :meth:`adj_det` except that ``inv`` and ``den`` are not guaranteed to be the adjugate and inverse. The ratio ``inv/den`` is equivalent to ``adj/det`` but some factors might be cancelled between ``inv`` and ``den``. In simple cases this might just be a minus sign so that ``(inv, den) == (-adj, -det)`` but factors more complicated than ``-1`` can also be cancelled. Cancellation is not guaranteed to be complete so ``inv`` and ``den`` may not be on lowest terms. The denominator ``den`` will be zero if and only if the determinant is zero. If the actual adjugate and determinant are needed, use :meth:`adj_det` instead. If the intention is to compute the inverse matrix or solve a system of equations then :meth:`inv_den` is more efficient. Examples ======== >>> from sympy import ZZ >>> from sympy.polys.matrices import DomainMatrix >>> A = DomainMatrix([ ... [ZZ(2), ZZ(-1), ZZ(0)], ... [ZZ(-1), ZZ(2), ZZ(-1)], ... [ZZ(0), ZZ(0), ZZ(2)]], (3, 3), ZZ) >>> Ainv, den = A.inv_den() >>> den 6 >>> Ainv DomainMatrix([[4, 2, 1], [2, 4, 2], [0, 0, 3]], (3, 3), ZZ) >>> A * Ainv == den * A.eye(A.shape, A.domain).to_dense() True Parameters ========== method : str, optional The method to use to compute the inverse. Can be one of ``None``, ``'rref'`` or ``'charpoly'``. If ``None`` then the method is chosen automatically (see :meth:`solve_den` for details). See Also ======== inv det adj_det solve_den """ I = self.eye(self.shape, self.domain) return self.solve_den(I, method=method)
DomainMatrix.inv_den
sympy
12
sympy/polys/matrices/domainmatrix.py
def scc(self): """Compute the strongly connected components of a DomainMatrix Explanation =========== A square matrix can be considered as the adjacency matrix for a directed graph where the row and column indices are the vertices. In this graph if there is an edge from vertex ``i`` to vertex ``j`` if ``M[i, j]`` is nonzero. This routine computes the strongly connected components of that graph which are subsets of the rows and columns that are connected by some nonzero element of the matrix. The strongly connected components are useful because many operations such as the determinant can be computed by working with the submatrices corresponding to each component. Examples ======== Find the strongly connected components of a matrix: >>> from sympy import ZZ >>> from sympy.polys.matrices import DomainMatrix >>> M = DomainMatrix([[ZZ(1), ZZ(0), ZZ(2)], ... [ZZ(0), ZZ(3), ZZ(0)], ... [ZZ(4), ZZ(6), ZZ(5)]], (3, 3), ZZ) >>> M.scc() [[1], [0, 2]] Compute the determinant from the components: >>> MM = M.to_Matrix() >>> MM Matrix([ [1, 0, 2], [0, 3, 0], [4, 6, 5]]) >>> MM[[1], [1]] Matrix([[3]]) >>> MM[[0, 2], [0, 2]] Matrix([ [1, 2], [4, 5]]) >>> MM.det() -9 >>> MM[[1], [1]].det() * MM[[0, 2], [0, 2]].det() -9 The components are given in reverse topological order and represent a permutation of the rows and columns that will bring the matrix into block lower-triangular form: >>> MM[[1, 0, 2], [1, 0, 2]] Matrix([ [3, 0, 0], [0, 1, 2], [6, 4, 5]]) Returns ======= List of lists of integers Each list represents a strongly connected component. See also ======== sympy.matrices.matrixbase.MatrixBase.strongly_connected_components sympy.utilities.iterables.strongly_connected_components """
/usr/src/app/target_test_cases/failed_tests_DomainMatrix.scc.txt
def scc(self): """Compute the strongly connected components of a DomainMatrix Explanation =========== A square matrix can be considered as the adjacency matrix for a directed graph where the row and column indices are the vertices. In this graph if there is an edge from vertex ``i`` to vertex ``j`` if ``M[i, j]`` is nonzero. This routine computes the strongly connected components of that graph which are subsets of the rows and columns that are connected by some nonzero element of the matrix. The strongly connected components are useful because many operations such as the determinant can be computed by working with the submatrices corresponding to each component. Examples ======== Find the strongly connected components of a matrix: >>> from sympy import ZZ >>> from sympy.polys.matrices import DomainMatrix >>> M = DomainMatrix([[ZZ(1), ZZ(0), ZZ(2)], ... [ZZ(0), ZZ(3), ZZ(0)], ... [ZZ(4), ZZ(6), ZZ(5)]], (3, 3), ZZ) >>> M.scc() [[1], [0, 2]] Compute the determinant from the components: >>> MM = M.to_Matrix() >>> MM Matrix([ [1, 0, 2], [0, 3, 0], [4, 6, 5]]) >>> MM[[1], [1]] Matrix([[3]]) >>> MM[[0, 2], [0, 2]] Matrix([ [1, 2], [4, 5]]) >>> MM.det() -9 >>> MM[[1], [1]].det() * MM[[0, 2], [0, 2]].det() -9 The components are given in reverse topological order and represent a permutation of the rows and columns that will bring the matrix into block lower-triangular form: >>> MM[[1, 0, 2], [1, 0, 2]] Matrix([ [3, 0, 0], [0, 1, 2], [6, 4, 5]]) Returns ======= List of lists of integers Each list represents a strongly connected component. See also ======== sympy.matrices.matrixbase.MatrixBase.strongly_connected_components sympy.utilities.iterables.strongly_connected_components """ if not self.is_square: raise DMNonSquareMatrixError('Matrix must be square for scc') return self.rep.scc()
DomainMatrix.scc
sympy
13
sympy/polys/matrices/domainmatrix.py
def solve_den(self, b, method=None): """ Solve matrix equation $Ax = b$ without fractions in the ground domain. Examples ======== Solve a matrix equation over the integers: >>> from sympy import ZZ >>> from sympy.polys.matrices import DM >>> A = DM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], ZZ) >>> b = DM([[ZZ(5)], [ZZ(6)]], ZZ) >>> xnum, xden = A.solve_den(b) >>> xden -2 >>> xnum DomainMatrix([[8], [-9]], (2, 1), ZZ) >>> A * xnum == xden * b True Solve a matrix equation over a polynomial ring: >>> from sympy import ZZ >>> from sympy.abc import x, y, z, a, b >>> R = ZZ[x, y, z, a, b] >>> M = DM([[x*y, x*z], [y*z, x*z]], R) >>> b = DM([[a], [b]], R) >>> M.to_Matrix() Matrix([ [x*y, x*z], [y*z, x*z]]) >>> b.to_Matrix() Matrix([ [a], [b]]) >>> xnum, xden = M.solve_den(b) >>> xden x**2*y*z - x*y*z**2 >>> xnum.to_Matrix() Matrix([ [ a*x*z - b*x*z], [-a*y*z + b*x*y]]) >>> M * xnum == xden * b True The solution can be expressed over a fraction field which will cancel gcds between the denominator and the elements of the numerator: >>> xsol = xnum.to_field() / xden >>> xsol.to_Matrix() Matrix([ [ (a - b)/(x*y - y*z)], [(-a*z + b*x)/(x**2*z - x*z**2)]]) >>> (M * xsol).to_Matrix() == b.to_Matrix() True When solving a large system of equations this cancellation step might be a lot slower than :func:`solve_den` itself. The solution can also be expressed as a ``Matrix`` without attempting any polynomial cancellation between the numerator and denominator giving a less simplified result more quickly: >>> xsol_uncancelled = xnum.to_Matrix() / xnum.domain.to_sympy(xden) >>> xsol_uncancelled Matrix([ [ (a*x*z - b*x*z)/(x**2*y*z - x*y*z**2)], [(-a*y*z + b*x*y)/(x**2*y*z - x*y*z**2)]]) >>> from sympy import cancel >>> cancel(xsol_uncancelled) == xsol.to_Matrix() True Parameters ========== self : :class:`DomainMatrix` The ``m x n`` matrix $A$ in the equation $Ax = b$. Underdetermined systems are not supported so ``m >= n``: $A$ should be square or have more rows than columns. b : :class:`DomainMatrix` The ``n x m`` matrix $b$ for the rhs. cp : list of :class:`~.DomainElement`, optional The characteristic polynomial of the matrix $A$. If not given, it will be computed using :meth:`charpoly`. method: str, optional The method to use for solving the system. Can be one of ``None``, ``'charpoly'`` or ``'rref'``. If ``None`` (the default) then the method will be chosen automatically. The ``charpoly`` method uses :meth:`solve_den_charpoly` and can only be used if the matrix is square. This method is division free and can be used with any domain. The ``rref`` method is fraction free but requires exact division in the ground domain (``exquo``). This is also suitable for most domains. This method can be used with overdetermined systems (more equations than unknowns) but not underdetermined systems as a unique solution is sought. Returns ======= (xnum, xden) : (DomainMatrix, DomainElement) The solution of the equation $Ax = b$ as a pair consisting of an ``n x m`` matrix numerator ``xnum`` and a scalar denominator ``xden``. The solution $x$ is given by ``x = xnum / xden``. The division free invariant is ``A * xnum == xden * b``. If $A$ is square then the denominator ``xden`` will be a divisor of the determinant $det(A)$. Raises ====== DMNonInvertibleMatrixError If the system $Ax = b$ does not have a unique solution. See Also ======== solve_den_charpoly solve_den_rref inv_den """
/usr/src/app/target_test_cases/failed_tests_DomainMatrix.solve_den.txt
def solve_den(self, b, method=None): """ Solve matrix equation $Ax = b$ without fractions in the ground domain. Examples ======== Solve a matrix equation over the integers: >>> from sympy import ZZ >>> from sympy.polys.matrices import DM >>> A = DM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], ZZ) >>> b = DM([[ZZ(5)], [ZZ(6)]], ZZ) >>> xnum, xden = A.solve_den(b) >>> xden -2 >>> xnum DomainMatrix([[8], [-9]], (2, 1), ZZ) >>> A * xnum == xden * b True Solve a matrix equation over a polynomial ring: >>> from sympy import ZZ >>> from sympy.abc import x, y, z, a, b >>> R = ZZ[x, y, z, a, b] >>> M = DM([[x*y, x*z], [y*z, x*z]], R) >>> b = DM([[a], [b]], R) >>> M.to_Matrix() Matrix([ [x*y, x*z], [y*z, x*z]]) >>> b.to_Matrix() Matrix([ [a], [b]]) >>> xnum, xden = M.solve_den(b) >>> xden x**2*y*z - x*y*z**2 >>> xnum.to_Matrix() Matrix([ [ a*x*z - b*x*z], [-a*y*z + b*x*y]]) >>> M * xnum == xden * b True The solution can be expressed over a fraction field which will cancel gcds between the denominator and the elements of the numerator: >>> xsol = xnum.to_field() / xden >>> xsol.to_Matrix() Matrix([ [ (a - b)/(x*y - y*z)], [(-a*z + b*x)/(x**2*z - x*z**2)]]) >>> (M * xsol).to_Matrix() == b.to_Matrix() True When solving a large system of equations this cancellation step might be a lot slower than :func:`solve_den` itself. The solution can also be expressed as a ``Matrix`` without attempting any polynomial cancellation between the numerator and denominator giving a less simplified result more quickly: >>> xsol_uncancelled = xnum.to_Matrix() / xnum.domain.to_sympy(xden) >>> xsol_uncancelled Matrix([ [ (a*x*z - b*x*z)/(x**2*y*z - x*y*z**2)], [(-a*y*z + b*x*y)/(x**2*y*z - x*y*z**2)]]) >>> from sympy import cancel >>> cancel(xsol_uncancelled) == xsol.to_Matrix() True Parameters ========== self : :class:`DomainMatrix` The ``m x n`` matrix $A$ in the equation $Ax = b$. Underdetermined systems are not supported so ``m >= n``: $A$ should be square or have more rows than columns. b : :class:`DomainMatrix` The ``n x m`` matrix $b$ for the rhs. cp : list of :class:`~.DomainElement`, optional The characteristic polynomial of the matrix $A$. If not given, it will be computed using :meth:`charpoly`. method: str, optional The method to use for solving the system. Can be one of ``None``, ``'charpoly'`` or ``'rref'``. If ``None`` (the default) then the method will be chosen automatically. The ``charpoly`` method uses :meth:`solve_den_charpoly` and can only be used if the matrix is square. This method is division free and can be used with any domain. The ``rref`` method is fraction free but requires exact division in the ground domain (``exquo``). This is also suitable for most domains. This method can be used with overdetermined systems (more equations than unknowns) but not underdetermined systems as a unique solution is sought. Returns ======= (xnum, xden) : (DomainMatrix, DomainElement) The solution of the equation $Ax = b$ as a pair consisting of an ``n x m`` matrix numerator ``xnum`` and a scalar denominator ``xden``. The solution $x$ is given by ``x = xnum / xden``. The division free invariant is ``A * xnum == xden * b``. If $A$ is square then the denominator ``xden`` will be a divisor of the determinant $det(A)$. Raises ====== DMNonInvertibleMatrixError If the system $Ax = b$ does not have a unique solution. See Also ======== solve_den_charpoly solve_den_rref inv_den """ m, n = self.shape bm, bn = b.shape if m != bm: raise DMShapeError("Matrix equation shape mismatch.") if method is None: method = 'rref' elif method == 'charpoly' and m != n: raise DMNonSquareMatrixError("method='charpoly' requires a square matrix.") if method == 'charpoly': xnum, xden = self.solve_den_charpoly(b) elif method == 'rref': xnum, xden = self.solve_den_rref(b) else: raise DMBadInputError("method should be 'rref' or 'charpoly'") return xnum, xden
DomainMatrix.solve_den
sympy
14
sympy/polys/matrices/domainmatrix.py
def solve_den_charpoly(self, b, cp=None, check=True): """ Solve matrix equation $Ax = b$ using the characteristic polynomial. This method solves the square matrix equation $Ax = b$ for $x$ using the characteristic polynomial without any division or fractions in the ground domain. Examples ======== Solve a matrix equation over the integers: >>> from sympy import ZZ >>> from sympy.polys.matrices import DM >>> A = DM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], ZZ) >>> b = DM([[ZZ(5)], [ZZ(6)]], ZZ) >>> xnum, detA = A.solve_den_charpoly(b) >>> detA -2 >>> xnum DomainMatrix([[8], [-9]], (2, 1), ZZ) >>> A * xnum == detA * b True Parameters ========== self : DomainMatrix The ``n x n`` matrix `A` in the equation `Ax = b`. Must be square and invertible. b : DomainMatrix The ``n x m`` matrix `b` for the rhs. cp : list, optional The characteristic polynomial of the matrix `A` if known. If not given, it will be computed using :meth:`charpoly`. check : bool, optional If ``True`` (the default) check that the determinant is not zero and raise an error if it is. If ``False`` then if the determinant is zero the return value will be equal to ``(A.adjugate()*b, 0)``. Returns ======= (xnum, detA) : (DomainMatrix, DomainElement) The solution of the equation `Ax = b` as a matrix numerator and scalar denominator pair. The denominator is equal to the determinant of `A` and the numerator is ``adj(A)*b``. The solution $x$ is given by ``x = xnum / detA``. The division free invariant is ``A * xnum == detA * b``. If ``b`` is the identity matrix, then ``xnum`` is the adjugate matrix and we have ``A * adj(A) == detA * I``. See Also ======== solve_den Main frontend for solving matrix equations with denominator. solve_den_rref Solve matrix equations using fraction-free RREF. inv_den Invert a matrix using the characteristic polynomial. """
/usr/src/app/target_test_cases/failed_tests_DomainMatrix.solve_den_charpoly.txt
def solve_den_charpoly(self, b, cp=None, check=True): """ Solve matrix equation $Ax = b$ using the characteristic polynomial. This method solves the square matrix equation $Ax = b$ for $x$ using the characteristic polynomial without any division or fractions in the ground domain. Examples ======== Solve a matrix equation over the integers: >>> from sympy import ZZ >>> from sympy.polys.matrices import DM >>> A = DM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], ZZ) >>> b = DM([[ZZ(5)], [ZZ(6)]], ZZ) >>> xnum, detA = A.solve_den_charpoly(b) >>> detA -2 >>> xnum DomainMatrix([[8], [-9]], (2, 1), ZZ) >>> A * xnum == detA * b True Parameters ========== self : DomainMatrix The ``n x n`` matrix `A` in the equation `Ax = b`. Must be square and invertible. b : DomainMatrix The ``n x m`` matrix `b` for the rhs. cp : list, optional The characteristic polynomial of the matrix `A` if known. If not given, it will be computed using :meth:`charpoly`. check : bool, optional If ``True`` (the default) check that the determinant is not zero and raise an error if it is. If ``False`` then if the determinant is zero the return value will be equal to ``(A.adjugate()*b, 0)``. Returns ======= (xnum, detA) : (DomainMatrix, DomainElement) The solution of the equation `Ax = b` as a matrix numerator and scalar denominator pair. The denominator is equal to the determinant of `A` and the numerator is ``adj(A)*b``. The solution $x$ is given by ``x = xnum / detA``. The division free invariant is ``A * xnum == detA * b``. If ``b`` is the identity matrix, then ``xnum`` is the adjugate matrix and we have ``A * adj(A) == detA * I``. See Also ======== solve_den Main frontend for solving matrix equations with denominator. solve_den_rref Solve matrix equations using fraction-free RREF. inv_den Invert a matrix using the characteristic polynomial. """ A, b = self.unify(b) m, n = self.shape mb, nb = b.shape if m != n: raise DMNonSquareMatrixError("Matrix must be square") if mb != m: raise DMShapeError("Matrix and vector must have the same number of rows") f, detA = self.adj_poly_det(cp=cp) if check and not detA: raise DMNonInvertibleMatrixError("Matrix is not invertible") # Compute adj(A)*b = det(A)*inv(A)*b using Horner's method without # constructing inv(A) explicitly. adjA_b = self.eval_poly_mul(f, b) return (adjA_b, detA)
DomainMatrix.solve_den_charpoly
sympy
15
sympy/physics/control/lti.py
def doit(self, cancel=False, expand=False, **hints): """ Returns the resultant transfer function or state space obtained by feedback connection of transfer functions or state space objects. Examples ======== >>> from sympy.abc import s >>> from sympy import Matrix >>> from sympy.physics.control.lti import TransferFunction, Feedback, StateSpace >>> plant = TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s) >>> controller = TransferFunction(5*s - 10, s + 7, s) >>> F1 = Feedback(plant, controller) >>> F1.doit() TransferFunction((s + 7)*(s**2 - 4*s + 2)*(3*s**2 + 7*s - 3), ((s + 7)*(s**2 - 4*s + 2) + (5*s - 10)*(3*s**2 + 7*s - 3))*(s**2 - 4*s + 2), s) >>> G = TransferFunction(2*s**2 + 5*s + 1, s**2 + 2*s + 3, s) >>> F2 = Feedback(G, TransferFunction(1, 1, s)) >>> F2.doit() TransferFunction((s**2 + 2*s + 3)*(2*s**2 + 5*s + 1), (s**2 + 2*s + 3)*(3*s**2 + 7*s + 4), s) Use kwarg ``expand=True`` to expand the resultant transfer function. Use ``cancel=True`` to cancel out the common terms in numerator and denominator. >>> F2.doit(cancel=True, expand=True) TransferFunction(2*s**2 + 5*s + 1, 3*s**2 + 7*s + 4, s) >>> F2.doit(expand=True) TransferFunction(2*s**4 + 9*s**3 + 17*s**2 + 17*s + 3, 3*s**4 + 13*s**3 + 27*s**2 + 29*s + 12, s) If the connection contain any ``StateSpace`` object then ``doit()`` will return the equivalent ``StateSpace`` object. >>> A1 = Matrix([[-1.5, -2], [1, 0]]) >>> B1 = Matrix([0.5, 0]) >>> C1 = Matrix([[0, 1]]) >>> A2 = Matrix([[0, 1], [-5, -2]]) >>> B2 = Matrix([0, 3]) >>> C2 = Matrix([[0, 1]]) >>> ss1 = StateSpace(A1, B1, C1) >>> ss2 = StateSpace(A2, B2, C2) >>> F3 = Feedback(ss1, ss2) >>> F3.doit() StateSpace(Matrix([ [-1.5, -2, 0, -0.5], [ 1, 0, 0, 0], [ 0, 0, 0, 1], [ 0, 3, -5, -2]]), Matrix([ [0.5], [ 0], [ 0], [ 0]]), Matrix([[0, 1, 0, 0]]), Matrix([[0]])) """
/usr/src/app/target_test_cases/failed_tests_Feedback.doit.txt
def doit(self, cancel=False, expand=False, **hints): """ Returns the resultant transfer function or state space obtained by feedback connection of transfer functions or state space objects. Examples ======== >>> from sympy.abc import s >>> from sympy import Matrix >>> from sympy.physics.control.lti import TransferFunction, Feedback, StateSpace >>> plant = TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s) >>> controller = TransferFunction(5*s - 10, s + 7, s) >>> F1 = Feedback(plant, controller) >>> F1.doit() TransferFunction((s + 7)*(s**2 - 4*s + 2)*(3*s**2 + 7*s - 3), ((s + 7)*(s**2 - 4*s + 2) + (5*s - 10)*(3*s**2 + 7*s - 3))*(s**2 - 4*s + 2), s) >>> G = TransferFunction(2*s**2 + 5*s + 1, s**2 + 2*s + 3, s) >>> F2 = Feedback(G, TransferFunction(1, 1, s)) >>> F2.doit() TransferFunction((s**2 + 2*s + 3)*(2*s**2 + 5*s + 1), (s**2 + 2*s + 3)*(3*s**2 + 7*s + 4), s) Use kwarg ``expand=True`` to expand the resultant transfer function. Use ``cancel=True`` to cancel out the common terms in numerator and denominator. >>> F2.doit(cancel=True, expand=True) TransferFunction(2*s**2 + 5*s + 1, 3*s**2 + 7*s + 4, s) >>> F2.doit(expand=True) TransferFunction(2*s**4 + 9*s**3 + 17*s**2 + 17*s + 3, 3*s**4 + 13*s**3 + 27*s**2 + 29*s + 12, s) If the connection contain any ``StateSpace`` object then ``doit()`` will return the equivalent ``StateSpace`` object. >>> A1 = Matrix([[-1.5, -2], [1, 0]]) >>> B1 = Matrix([0.5, 0]) >>> C1 = Matrix([[0, 1]]) >>> A2 = Matrix([[0, 1], [-5, -2]]) >>> B2 = Matrix([0, 3]) >>> C2 = Matrix([[0, 1]]) >>> ss1 = StateSpace(A1, B1, C1) >>> ss2 = StateSpace(A2, B2, C2) >>> F3 = Feedback(ss1, ss2) >>> F3.doit() StateSpace(Matrix([ [-1.5, -2, 0, -0.5], [ 1, 0, 0, 0], [ 0, 0, 0, 1], [ 0, 3, -5, -2]]), Matrix([ [0.5], [ 0], [ 0], [ 0]]), Matrix([[0, 1, 0, 0]]), Matrix([[0]])) """ if self.is_StateSpace_object: sys1_ss = self.sys1.doit().rewrite(StateSpace) sys2_ss = self.sys2.doit().rewrite(StateSpace) A1, B1, C1, D1 = sys1_ss.A, sys1_ss.B, sys1_ss.C, sys1_ss.D A2, B2, C2, D2 = sys2_ss.A, sys2_ss.B, sys2_ss.C, sys2_ss.D # Create identity matrices I_inputs = eye(self.num_inputs) I_outputs = eye(self.num_outputs) # Compute F and its inverse F = I_inputs - self.sign * D2 * D1 E = F.inv() # Compute intermediate matrices E_D2 = E * D2 E_C2 = E * C2 T1 = I_outputs + self.sign * D1 * E_D2 T2 = I_inputs + self.sign * E_D2 * D1 A = Matrix.vstack( Matrix.hstack(A1 + self.sign * B1 * E_D2 * C1, self.sign * B1 * E_C2), Matrix.hstack(B2 * T1 * C1, A2 + self.sign * B2 * D1 * E_C2) ) B = Matrix.vstack(B1 * T2, B2 * D1 * T2) C = Matrix.hstack(T1 * C1, self.sign * D1 * E_C2) D = D1 * T2 return StateSpace(A, B, C, D) arg_list = list(self.sys1.args) if isinstance(self.sys1, Series) else [self.sys1] # F_n and F_d are resultant TFs of num and den of Feedback. F_n, unit = self.sys1.doit(), TransferFunction(1, 1, self.sys1.var) if self.sign == -1: F_d = Parallel(unit, Series(self.sys2, *arg_list)).doit() else: F_d = Parallel(unit, -Series(self.sys2, *arg_list)).doit() _resultant_tf = TransferFunction(F_n.num * F_d.den, F_n.den * F_d.num, F_n.var) if cancel: _resultant_tf = _resultant_tf.simplify() if expand: _resultant_tf = _resultant_tf.expand() return _resultant_tf
Feedback.doit
sympy
16
sympy/physics/mechanics/actuator.py
def to_loads(self): """Loads required by the equations of motion method classes. Explanation =========== ``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be passed to the ``loads`` parameters of its ``kanes_equations`` method when constructing the equations of motion. This method acts as a utility to produce the correctly-structred pairs of points and vectors required so that these can be easily concatenated with other items in the list of loads and passed to ``KanesMethod.kanes_equations``. These loads are also in the correct form to also be passed to the other equations of motion method classes, e.g. ``LagrangesMethod``. Examples ======== The below example shows how to generate the loads produced by a force actuator that follows a linear pathway. In this example we'll assume that the force actuator is being used to model a simple linear spring. First, create a linear pathway between two points separated by the coordinate ``q`` in the ``x`` direction of the global frame ``N``. >>> from sympy.physics.mechanics import (LinearPathway, Point, ... ReferenceFrame) >>> from sympy.physics.vector import dynamicsymbols >>> q = dynamicsymbols('q') >>> N = ReferenceFrame('N') >>> pA, pB = Point('pA'), Point('pB') >>> pB.set_pos(pA, q*N.x) >>> pathway = LinearPathway(pA, pB) Now create a symbol ``k`` to describe the spring's stiffness and instantiate a force actuator that produces a (contractile) force proportional to both the spring's stiffness and the pathway's length. Note that actuator classes use the sign convention that expansile forces are positive, so for a spring to produce a contractile force the spring force needs to be calculated as the negative for the stiffness multiplied by the length. >>> from sympy import symbols >>> from sympy.physics.mechanics import ForceActuator >>> stiffness = symbols('k') >>> spring_force = -stiffness*pathway.length >>> spring = ForceActuator(spring_force, pathway) The forces produced by the spring can be generated in the list of loads form that ``KanesMethod`` (and other equations of motion methods) requires by calling the ``to_loads`` method. >>> spring.to_loads() [(pA, k*q(t)*N.x), (pB, - k*q(t)*N.x)] A simple linear damper can be modeled in a similar way. Create another symbol ``c`` to describe the dampers damping coefficient. This time instantiate a force actuator that produces a force proportional to both the damper's damping coefficient and the pathway's extension velocity. Note that the damping force is negative as it acts in the opposite direction to which the damper is changing in length. >>> damping_coefficient = symbols('c') >>> damping_force = -damping_coefficient*pathway.extension_velocity >>> damper = ForceActuator(damping_force, pathway) Again, the forces produces by the damper can be generated by calling the ``to_loads`` method. >>> damper.to_loads() [(pA, c*Derivative(q(t), t)*N.x), (pB, - c*Derivative(q(t), t)*N.x)] """
/usr/src/app/target_test_cases/failed_tests_ForceActuator.to_loads.txt
def to_loads(self): """Loads required by the equations of motion method classes. Explanation =========== ``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be passed to the ``loads`` parameters of its ``kanes_equations`` method when constructing the equations of motion. This method acts as a utility to produce the correctly-structred pairs of points and vectors required so that these can be easily concatenated with other items in the list of loads and passed to ``KanesMethod.kanes_equations``. These loads are also in the correct form to also be passed to the other equations of motion method classes, e.g. ``LagrangesMethod``. Examples ======== The below example shows how to generate the loads produced by a force actuator that follows a linear pathway. In this example we'll assume that the force actuator is being used to model a simple linear spring. First, create a linear pathway between two points separated by the coordinate ``q`` in the ``x`` direction of the global frame ``N``. >>> from sympy.physics.mechanics import (LinearPathway, Point, ... ReferenceFrame) >>> from sympy.physics.vector import dynamicsymbols >>> q = dynamicsymbols('q') >>> N = ReferenceFrame('N') >>> pA, pB = Point('pA'), Point('pB') >>> pB.set_pos(pA, q*N.x) >>> pathway = LinearPathway(pA, pB) Now create a symbol ``k`` to describe the spring's stiffness and instantiate a force actuator that produces a (contractile) force proportional to both the spring's stiffness and the pathway's length. Note that actuator classes use the sign convention that expansile forces are positive, so for a spring to produce a contractile force the spring force needs to be calculated as the negative for the stiffness multiplied by the length. >>> from sympy import symbols >>> from sympy.physics.mechanics import ForceActuator >>> stiffness = symbols('k') >>> spring_force = -stiffness*pathway.length >>> spring = ForceActuator(spring_force, pathway) The forces produced by the spring can be generated in the list of loads form that ``KanesMethod`` (and other equations of motion methods) requires by calling the ``to_loads`` method. >>> spring.to_loads() [(pA, k*q(t)*N.x), (pB, - k*q(t)*N.x)] A simple linear damper can be modeled in a similar way. Create another symbol ``c`` to describe the dampers damping coefficient. This time instantiate a force actuator that produces a force proportional to both the damper's damping coefficient and the pathway's extension velocity. Note that the damping force is negative as it acts in the opposite direction to which the damper is changing in length. >>> damping_coefficient = symbols('c') >>> damping_force = -damping_coefficient*pathway.extension_velocity >>> damper = ForceActuator(damping_force, pathway) Again, the forces produces by the damper can be generated by calling the ``to_loads`` method. >>> damper.to_loads() [(pA, c*Derivative(q(t), t)*N.x), (pB, - c*Derivative(q(t), t)*N.x)] """ return self.pathway.to_loads(self.force)
ForceActuator.to_loads