import json import datasets logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """\ Every prompt dataset. Every Prompt is a data-driven approach to mining instructions from the web. It contains over a million FAQs and HowTos from around the world in a structured format. It also has basic pre-processing to calculate the length of the useful text and identify the language of that text with the help of GCLD3 """ _URLS = [ "every_prompt.jsonlines", ] class EveryPromptDataset(datasets.GeneratorBasedBuilder): """Every Prompt Dataset""" VERSION = datasets.Version("1.0.0") DEFAULT_CONFIG_NAME = "default" BUILDER_CONFIGS = [ datasets.BuilderConfig(name="default", version=VERSION, description=""), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "language": datasets.Value("string"), "language_is_reliable": datasets.Value("bool"), "text_length": datasets.Value("int32"), "data_length": datasets.Value("int32"), "text_to_data_ratio": datasets.Value("float32"), "url": datasets.Value("string"), "schema_type": datasets.Value("string"), "payload": datasets.Value("string"), } ), ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files} ) ] def _generate_examples(self, filepaths): """This function returns the examples in the raw (text) form.""" logger.info("generating examples from = %s", filepaths) key = 0 for path in filepaths: with open(path, encoding="utf-8") as f: for instruction_str in f: instruction = json.loads(instruction_str) yield key, instruction key += 1