|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""RedPajama V2: Quality annotated Web Text Documents.""" |
|
|
|
import json |
|
|
|
import datasets |
|
import traceback |
|
import os |
|
import gzip |
|
from typing import List |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_DESCRIPTION = """\ |
|
RedPajama V2: an Open Dataset for Training Large Language Models |
|
""" |
|
|
|
_URL_BASE = 'https://data.together.xyz/redpajama-data-v2/v1.0.0' |
|
_LANGUAGES = ("en", "de", "fr", "es", "it") |
|
_LISTINGS_PATTERN = "listings/{language}-{snapshot}-{partition}.txt" |
|
|
|
_CC_SNAPSHOT_IDS = ( |
|
"2014-15", |
|
"2014-23", |
|
"2014-35", |
|
"2014-41", |
|
"2014-42", |
|
"2014-49", |
|
"2014-52", |
|
"2015-14", |
|
"2015-22", |
|
"2015-27", |
|
"2015-32", |
|
"2015-35", |
|
"2015-40", |
|
"2015-48", |
|
"2016-07", |
|
"2016-18", |
|
"2016-22", |
|
"2016-26", |
|
"2016-30", |
|
"2016-36", |
|
"2016-40", |
|
"2016-44", |
|
"2016-50", |
|
"2017-04", |
|
"2017-09", |
|
"2017-17", |
|
"2017-22", |
|
"2017-26", |
|
"2017-30", |
|
"2017-34", |
|
"2017-39", |
|
"2017-43", |
|
"2017-47", |
|
"2017-51", |
|
"2018-05", |
|
"2018-09", |
|
"2018-13", |
|
"2018-17", |
|
"2018-22", |
|
"2018-26", |
|
"2018-30", |
|
"2018-34", |
|
"2018-39", |
|
"2018-43", |
|
"2018-47", |
|
"2018-51", |
|
"2019-04", |
|
"2019-09", |
|
"2019-13", |
|
"2019-18", |
|
"2019-22", |
|
"2019-26", |
|
"2019-30", |
|
"2019-35", |
|
"2019-39", |
|
"2019-43", |
|
"2019-47", |
|
"2019-51", |
|
"2020-05", |
|
"2020-10", |
|
"2020-16", |
|
"2020-24", |
|
"2020-29", |
|
"2020-34", |
|
"2020-40", |
|
"2020-45", |
|
"2020-50", |
|
"2021-04", |
|
"2021-10", |
|
"2021-17", |
|
"2021-21", |
|
"2021-25", |
|
"2021-31", |
|
"2021-39", |
|
"2021-43", |
|
"2021-49", |
|
"2022-05", |
|
"2022-21", |
|
"2022-27", |
|
"2022-33", |
|
"2022-40", |
|
"2022-49", |
|
"2023-06", |
|
"2023-14" |
|
) |
|
|
|
|
|
class RedPajamaDataV2Config(datasets.BuilderConfig): |
|
"""BuilderConfig for RedPajama.""" |
|
|
|
def __init__(self, *args, **kwargs): |
|
"""BuilderConfig for RedPajama. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(RedPajamaDataV2Config, self).__init__(**kwargs) |
|
self.partition: str = kwargs.pop("partition", "all") |
|
self.snapshots: List[str] = kwargs.pop("snapshots", _CC_SNAPSHOT_IDS) |
|
self.languages: List[str] = kwargs.pop("languages", _LANGUAGES) |
|
|
|
|
|
class RedPajamaV2(datasets.GeneratorBasedBuilder): |
|
""" RedPajama V2: Quality annotated Web Text Documents. """ |
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
BUILDER_CONFIGS = [ |
|
RedPajamaDataV2Config( |
|
name='default', |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2", |
|
), |
|
RedPajamaDataV2Config( |
|
name='sample', |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 Sample", |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"raw_content": datasets.Value("string"), |
|
"doc_id": datasets.Value("string"), |
|
"meta": datasets.Value("string"), |
|
"quality_signals": datasets.Value("string") |
|
} |
|
), |
|
supervised_keys=None, |
|
) |
|
|
|
def _split_generators_sample(self, dl_manager): |
|
|
|
sample_listings = dl_manager.download_and_extract( |
|
"sample/sample_listings.txt" |
|
) |
|
with open(sample_listings, "r") as fd: |
|
listings = [line.strip() for line in fd] |
|
|
|
|
|
documents_files = dl_manager.download({ |
|
"head_middle": [ |
|
f"sample/documents/{lst}.json.gz" for lst in listings |
|
] |
|
}) |
|
|
|
|
|
quality_signals_files = dl_manager.download({ |
|
"head_middle": [ |
|
f"sample/quality_signals/{lst}.signals.json.gz" |
|
for lst in listings |
|
] |
|
}) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"listings_ids": {"head_middle": listings}, |
|
"documents_files": documents_files, |
|
"quality_signals_files": quality_signals_files |
|
} |
|
) |
|
] |
|
|
|
def _split_generators_full(self, dl_manager): |
|
snapshots = getattr(self.config, 'snapshots', _CC_SNAPSHOT_IDS) |
|
languages = getattr(self.config, 'languages', _LANGUAGES) |
|
partition = getattr(self.config, 'partition', 'all') |
|
|
|
partitions = { |
|
"all": ["head_middle", "tail"] |
|
}.get(partition, [partition]) |
|
|
|
|
|
listings_files_urls = {} |
|
for part in partitions: |
|
listings_files_urls[part] = [] |
|
for snapshot_id in snapshots: |
|
for lang in languages: |
|
listings_files_urls[part].append( |
|
_LISTINGS_PATTERN.format( |
|
language=lang, |
|
snapshot=snapshot_id, |
|
partition=part, |
|
) |
|
) |
|
|
|
|
|
listings_files = dl_manager.download_and_extract(listings_files_urls) |
|
|
|
|
|
listings_ids = {} |
|
for part, part_listings_files in listings_files.items(): |
|
listings_ids[part] = [] |
|
for listings_file in part_listings_files: |
|
with open(listings_file, encoding="utf-8") as f: |
|
listings_ids[part].extend([ |
|
line.strip() for line in f |
|
]) |
|
|
|
|
|
document_urls = {} |
|
quality_signals_urls = {} |
|
for part, part_listings_ids in listings_ids.items(): |
|
document_urls[part] = [] |
|
quality_signals_urls[part] = [] |
|
for lst_id in part_listings_ids: |
|
document_urls[part].append( |
|
os.path.join(_URL_BASE, f"documents/{lst_id}.json.gz") |
|
) |
|
if part != "head_middle": |
|
continue |
|
|
|
quality_signals_urls[part].append( |
|
os.path.join( |
|
_URL_BASE, f"quality_signals/{lst_id}.signals.json.gz" |
|
) |
|
) |
|
|
|
documents_files = dl_manager.download(document_urls) |
|
quality_signals_files = dl_manager.download(quality_signals_urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"listings_ids": listings_ids, |
|
"documents_files": documents_files, |
|
"quality_signals_files": quality_signals_files |
|
} |
|
) |
|
] |
|
|
|
def _split_generators(self, dl_manager): |
|
if self.config.name.endswith("sample"): |
|
return self._split_generators_sample(dl_manager) |
|
|
|
return self._split_generators_full(dl_manager) |
|
|
|
def _generate_examples( |
|
self, listings_ids, documents_files, quality_signals_files |
|
): |
|
key = 0 |
|
for part in documents_files.keys(): |
|
part_docs_files = documents_files[part] |
|
part_qs_files = quality_signals_files[part] |
|
part_listings_ids = listings_ids[part] |
|
|
|
if len(part_qs_files) == 0: |
|
for sample in self._handle_tail_partition( |
|
part, part_docs_files, part_listings_ids |
|
): |
|
yield key, sample |
|
key += 1 |
|
continue |
|
|
|
for sample in self._handle_head_middle_partition( |
|
part, part_docs_files, part_qs_files, part_listings_ids |
|
): |
|
yield key, sample |
|
key += 1 |
|
|
|
def _handle_tail_partition(self, part, docs_files, listings_ids): |
|
for doc_file, listing_id in zip(docs_files, listings_ids): |
|
with gzip.open(doc_file, "rt", encoding="utf-8") as df: |
|
for row, doc in enumerate(df): |
|
doc_id = f"{listing_id}.json.gz/{row}" |
|
try: |
|
yield self.handle_record(part, doc_id, doc, None) |
|
except Exception as e: |
|
print(f'doc_file: {doc_file}') |
|
print(f'row: {row}') |
|
traceback.print_exc() |
|
raise e |
|
|
|
def _handle_head_middle_partition( |
|
self, part, docs_files, qs_files, listings_ids |
|
): |
|
assert len(docs_files) == len(qs_files) |
|
|
|
listings_ids = listings_ids[:len(docs_files)] |
|
|
|
for doc_file, qs_file, listings_id in zip( |
|
docs_files, qs_files, listings_ids |
|
): |
|
try: |
|
with gzip.open(doc_file, "rt", encoding="utf-8") as df: |
|
with gzip.open(qs_file, "rt", encoding="utf-8") as qf: |
|
for row, (doc, qs) in enumerate(zip(df, qf)): |
|
doc_id = f"{listings_id}.json.gz/{row}" |
|
try: |
|
yield self.handle_record(part, doc_id, doc, qs) |
|
except Exception as e: |
|
print(f'failed handling row {row} in ' |
|
f'{doc_file} ({qs_file})') |
|
traceback.print_exc() |
|
continue |
|
except gzip.BadGzipFile as e: |
|
|
|
print(f'BadGzipFile: {doc_file, qs_file}') |
|
traceback.print_exc() |
|
continue |
|
|
|
@staticmethod |
|
def handle_record(part, doc_id, doc, qs): |
|
doc = json.loads(doc) |
|
qs = json.loads(qs) if qs is not None else {} |
|
|
|
meta = { |
|
"url": doc["url"], |
|
"partition": part, |
|
"language": doc["language"], |
|
"source_domain": doc["source_domain"], |
|
"date_download": doc["date_download"], |
|
"digest": doc["digest"], |
|
} |
|
|
|
quality_signals = json.dumps(qs.get("quality_signals", {})) |
|
|
|
return { |
|
"raw_content": doc["raw_content"], |
|
"doc_id": doc_id, |
|
"meta": json.dumps(meta), |
|
"quality_signals": quality_signals, |
|
} |
|
|