import json import tarfile from datasets import DatasetInfo, DatasetBuilder, DownloadManager, BuilderConfig, SplitGenerator, Split, Version import datasets import os import requests import re from tqdm import tqdm SUPPORTED_YEARS = ["1774"] # Add years from 1798 to 1964 to the supported years SUPPORTED_YEARS = SUPPORTED_YEARS + [str(year) for year in range(1798, 1964)] def make_year_file_splits(): """ Collects a list of available files for each year. Returns: dict: A dictionary mapping each year to its corresponding file URL. list: A list of years. """ base_url = "https://huggingface.co/datasets/dell-research-harvard/AmericanStories/resolve/main/" # Make a list of years from 1774 to 1960 year_list = [str(year) for year in range(1774, 1960)] data_files = [f"faro_{year}.tar.gz" for year in year_list] url_list = [base_url + file for file in data_files] splits = {year: file for year, file in zip(year_list, url_list)} years = year_list return splits, years _CITATION = """\ Coming Soon """ _DESCRIPTION = """\ American Stories offers high-quality structured data from historical newspapers suitable for pre-training large language models to enhance the understanding of historical English and world knowledge. It can also be integrated into external databases of retrieval-augmented language models, enabling broader access to historical information, including interpretations of political events and intricate details about people's ancestors. Additionally, the structured article texts facilitate the application of transformer-based methods for popular tasks like detecting reproduced content, significantly improving accuracy compared to traditional OCR methods. American Stories serves as a substantial and valuable dataset for advancing multimodal layout analysis models and other multimodal applications. """ _FILE_DICT, _YEARS = make_year_file_splits() class CustomBuilderConfig(datasets.BuilderConfig): """BuilderConfig for AmericanStories dataset with different configurations.""" def __init__(self, year_list=None,features=["article_id", "newspaper_name", "edition", "date", "page", "headline", "byline", "article"], **kwargs): """ BuilderConfig for AmericanStories dataset. Args: year_list (list): A list of years to include in the dataset. **kwargs: Additional keyword arguments forwarded to the superclass. """ super(CustomBuilderConfig, self).__init__(**kwargs) self.year_list = year_list self.features = features class AmericanStories(datasets.GeneratorBasedBuilder): """Dataset builder class for AmericanStories dataset.""" VERSION = datasets.Version("0.1.0") BUILDER_CONFIGS = [ CustomBuilderConfig( name="all_years", version=VERSION, description="All years in the dataset" ), CustomBuilderConfig( name="subset_years", version=VERSION, description="Subset of years in the dataset", year_list=["1774", "1804"] ), CustomBuilderConfig( name="all_years_content_regions", version=VERSION, description="All years in the dataset", ), CustomBuilderConfig( name="subset_years_content_regions", version=VERSION, description="Subset of years in the dataset", year_list=["1774", "1804"], ) ] DEFAULT_CONFIG_NAME = "subset_years" def _info(self): """ Specifies the DatasetInfo object for the AmericanStories dataset. Returns: datasets.DatasetInfo: The DatasetInfo object. """ if not self.config.name.endswith("content_regions"): features = datasets.Features( { "article_id": datasets.Value("string"), "newspaper_name": datasets.Value("string"), "edition": datasets.Value("string"), "date": datasets.Value("string"), "page": datasets.Value("string"), "headline": datasets.Value("string"), "byline": datasets.Value("string"), "article": datasets.Value("string"), } ) else: features = datasets.Features( { "raw_data_string": datasets.Value("string"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, citation=_CITATION, ) def _split_generators(self, dl_manager): """ Downloads and extracts the data, and defines the dataset splits. Args: dl_manager (datasets.DownloadManager): The DownloadManager instance. Returns: list: A list of SplitGenerator objects. """ if self.config.name == "subset_years": print("Only taking a subset of years. Change name to 'all_years' to use all years in the dataset.") if not self.config.year_list: raise ValueError("Please provide a valid year_list") elif not set(self.config.year_list).issubset(set(SUPPORTED_YEARS)): raise ValueError(f"Only {SUPPORTED_YEARS} are supported. Please provide a valid year_list") urls = _FILE_DICT year_list = _YEARS # Subset _FILE_DICT and year_list to only include years in config.year_list if self.config.year_list: urls = {year: urls[year] for year in self.config.year_list} year_list = self.config.year_list data_dir = dl_manager.download_and_extract(urls) # Return a list of splits, where each split corresponds to a year return [ datasets.SplitGenerator( name=year, gen_kwargs={ "year_dir": os.path.join(data_dir[year], "mnt", "122a7683-fa4b-45dd-9f13-b18cc4f4a187", "ca_rule_based_fa_clean", "faro_" + year), "split": year, "associated": True if not self.config.name.endswith("content_regions") else False, }, ) for year in year_list ] def _generate_examples(self, year_dir,split, associated): """ Generates examples for the specified year and split. Args: year_dir (str): The directory path for the year. associated (bool): Whether or not the output should be contents associated into an "article" or raw contents. Yields: tuple: The key-value pair containing the example ID and the example data. """ print("Associated: " + str(associated)) if associated: for filepath in os.listdir(year_dir): with open(os.path.join(year_dir, filepath), encoding="utf-8") as f: try : data = json.load(f) except: print("Error loading file: " + filepath) continue if "lccn" in data.keys(): scan_id = filepath.split('.')[0] scan_date = filepath.split("_")[0] scan_page = filepath.split("_")[1] scan_edition = filepath.split("_")[-2][8:] newspaper_name = data["lccn"]["title"] full_articles_in_data = data["full articles"] for article in full_articles_in_data: article_id = str(article["full_article_id"]) + "_" + scan_id yield article_id, { "article_id": article_id, "newspaper_name": newspaper_name, "edition": scan_edition, "date": scan_date, "page": scan_page, "headline": article["headline"], "byline": article["byline"], "article": article["article"], } else: print("Returning a json as a string, feel free to parse it yourself!") for filepath in os.listdir(year_dir): with open(os.path.join(year_dir, filepath), encoding="utf-8") as f: try : data = json.load(f) except: # print("Error loading file: " + filepath) continue ###Convert json to strng data=json.dumps(data) print((data)) print(type(data)) scan_id=filepath.split('.')[0] ##Yield the scan id and the raw data string yield scan_id, { "raw_data_string": str(data) }